]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
Merge tag 'powerpc-6.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux.git] / drivers / gpu / drm / amd / amdgpu / sdma_v4_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_ucode.h"
31 #include "amdgpu_trace.h"
32
33 #include "sdma0/sdma0_4_2_offset.h"
34 #include "sdma0/sdma0_4_2_sh_mask.h"
35 #include "sdma1/sdma1_4_2_offset.h"
36 #include "sdma1/sdma1_4_2_sh_mask.h"
37 #include "sdma2/sdma2_4_2_2_offset.h"
38 #include "sdma2/sdma2_4_2_2_sh_mask.h"
39 #include "sdma3/sdma3_4_2_2_offset.h"
40 #include "sdma3/sdma3_4_2_2_sh_mask.h"
41 #include "sdma4/sdma4_4_2_2_offset.h"
42 #include "sdma4/sdma4_4_2_2_sh_mask.h"
43 #include "sdma5/sdma5_4_2_2_offset.h"
44 #include "sdma5/sdma5_4_2_2_sh_mask.h"
45 #include "sdma6/sdma6_4_2_2_offset.h"
46 #include "sdma6/sdma6_4_2_2_sh_mask.h"
47 #include "sdma7/sdma7_4_2_2_offset.h"
48 #include "sdma7/sdma7_4_2_2_sh_mask.h"
49 #include "sdma0/sdma0_4_1_default.h"
50
51 #include "soc15_common.h"
52 #include "soc15.h"
53 #include "vega10_sdma_pkt_open.h"
54
55 #include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
56 #include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
57
58 #include "amdgpu_ras.h"
59 #include "sdma_v4_4.h"
60
61 MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
62 MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
63 MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
64 MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin");
65 MODULE_FIRMWARE("amdgpu/vega20_sdma.bin");
66 MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
67 MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
68 MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
69 MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
70 MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
71 MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
72 MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
73 MODULE_FIRMWARE("amdgpu/aldebaran_sdma.bin");
74
75 #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK  0x000000F8L
76 #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
77
78 #define WREG32_SDMA(instance, offset, value) \
79         WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value)
80 #define RREG32_SDMA(instance, offset) \
81         RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)))
82
83 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
84 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
85 static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
86 static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
87 static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev);
88
89 static const struct soc15_reg_golden golden_settings_sdma_4[] = {
90         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
91         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xff000ff0, 0x3f000100),
92         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0100, 0x00000100),
93         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
94         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
95         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
96         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003ff006, 0x0003c000),
97         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
98         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
99         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
100         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
101         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
102         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
103         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
104         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
105         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
106         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
107         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
108         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_POWER_CNTL, 0x003ff000, 0x0003c000),
109         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
110         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
111         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
112         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
113         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
114         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
115 };
116
117 static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
118         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
119         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
120         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
121         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
122         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
123         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
124         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
125 };
126
127 static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
128         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
129         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
130         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
131         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
132         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
133         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
134         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
135 };
136
137 static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
138         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
139         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
140         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100),
141         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
142         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0xfc3fffff, 0x40000051),
143         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100),
144         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
145         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
146         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
147         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
148         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
149 };
150
151 static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = {
152         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
153 };
154
155 static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
156 {
157         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
158         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
159         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
160         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
161         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
162         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
163         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
164         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
165         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RD_BURST_CNTL, 0x0000000f, 0x00000003),
166         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
167         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
168         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
169         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
170         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
171         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
172         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
173         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
174         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
175         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
176         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
177         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
178         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
179         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
180         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
181         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
182         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
183         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
184 };
185
186 static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
187         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
188         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
189         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
190         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
191         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
192         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
193         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
194         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
195         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RD_BURST_CNTL, 0x0000000f, 0x00000003),
196         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
197         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
198         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
199         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
200         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
201         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
202         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
203         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
204         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
205         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
206         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
207         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
208         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
209         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
210         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
211         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
212         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
213         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
214 };
215
216 static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
217 {
218         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
219         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002)
220 };
221
222 static const struct soc15_reg_golden golden_settings_sdma_rv2[] =
223 {
224         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00003001),
225         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00003001)
226 };
227
228 static const struct soc15_reg_golden golden_settings_sdma_arct[] =
229 {
230         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
231         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
232         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
233         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
234         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
235         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
236         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
237         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
238         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
239         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
240         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
241         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
242         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
243         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
244         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
245         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
246         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
247         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
248         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
249         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
250         SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
251         SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
252         SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
253         SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
254         SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
255         SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
256         SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
257         SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
258         SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
259         SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
260         SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
261         SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
262 };
263
264 static const struct soc15_reg_golden golden_settings_sdma_aldebaran[] = {
265         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
266         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
267         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
268         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
269         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
270         SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
271         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
272         SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
273         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
274         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
275         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
276         SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
277         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
278         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
279         SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
280 };
281
282 static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
283         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
284         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
285         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
286         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002),
287         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
288         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003fff07, 0x40000051),
289         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
290         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
291         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003e0),
292         SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
293 };
294
295 static const struct soc15_ras_field_entry sdma_v4_0_ras_fields[] = {
296         { "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
297         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UCODE_BUF_SED),
298         0, 0,
299         },
300         { "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
301         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_RB_CMD_BUF_SED),
302         0, 0,
303         },
304         { "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
305         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_IB_CMD_BUF_SED),
306         0, 0,
307         },
308         { "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
309         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RD_FIFO_SED),
310         0, 0,
311         },
312         { "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
313         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_UTCL1_RDBST_FIFO_SED),
314         0, 0,
315         },
316         { "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
317         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_DATA_LUT_FIFO_SED),
318         0, 0,
319         },
320         { "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
321         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED),
322         0, 0,
323         },
324         { "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
325         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED),
326         0, 0,
327         },
328         { "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
329         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED),
330         0, 0,
331         },
332         { "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
333         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED),
334         0, 0,
335         },
336         { "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
337         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED),
338         0, 0,
339         },
340         { "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
341         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED),
342         0, 0,
343         },
344         { "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
345         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED),
346         0, 0,
347         },
348         { "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
349         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED),
350         0, 0,
351         },
352         { "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
353         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED),
354         0, 0,
355         },
356         { "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
357         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED),
358         0, 0,
359         },
360         { "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
361         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED),
362         0, 0,
363         },
364         { "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
365         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED),
366         0, 0,
367         },
368         { "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
369         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED),
370         0, 0,
371         },
372         { "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
373         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED),
374         0, 0,
375         },
376         { "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
377         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED),
378         0, 0,
379         },
380         { "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
381         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED),
382         0, 0,
383         },
384         { "SDMA_SPLIT_DAT_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
385         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_SPLIT_DAT_BUF_SED),
386         0, 0,
387         },
388         { "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER),
389         SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MC_WR_ADDR_FIFO_SED),
390         0, 0,
391         },
392 };
393
394 static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
395                 u32 instance, u32 offset)
396 {
397         switch (instance) {
398         case 0:
399                 return (adev->reg_offset[SDMA0_HWIP][0][0] + offset);
400         case 1:
401                 return (adev->reg_offset[SDMA1_HWIP][0][0] + offset);
402         case 2:
403                 return (adev->reg_offset[SDMA2_HWIP][0][1] + offset);
404         case 3:
405                 return (adev->reg_offset[SDMA3_HWIP][0][1] + offset);
406         case 4:
407                 return (adev->reg_offset[SDMA4_HWIP][0][1] + offset);
408         case 5:
409                 return (adev->reg_offset[SDMA5_HWIP][0][1] + offset);
410         case 6:
411                 return (adev->reg_offset[SDMA6_HWIP][0][1] + offset);
412         case 7:
413                 return (adev->reg_offset[SDMA7_HWIP][0][1] + offset);
414         default:
415                 break;
416         }
417         return 0;
418 }
419
420 static unsigned sdma_v4_0_seq_to_irq_id(int seq_num)
421 {
422         switch (seq_num) {
423         case 0:
424                 return SOC15_IH_CLIENTID_SDMA0;
425         case 1:
426                 return SOC15_IH_CLIENTID_SDMA1;
427         case 2:
428                 return SOC15_IH_CLIENTID_SDMA2;
429         case 3:
430                 return SOC15_IH_CLIENTID_SDMA3;
431         case 4:
432                 return SOC15_IH_CLIENTID_SDMA4;
433         case 5:
434                 return SOC15_IH_CLIENTID_SDMA5;
435         case 6:
436                 return SOC15_IH_CLIENTID_SDMA6;
437         case 7:
438                 return SOC15_IH_CLIENTID_SDMA7;
439         default:
440                 break;
441         }
442         return -EINVAL;
443 }
444
445 static int sdma_v4_0_irq_id_to_seq(unsigned client_id)
446 {
447         switch (client_id) {
448         case SOC15_IH_CLIENTID_SDMA0:
449                 return 0;
450         case SOC15_IH_CLIENTID_SDMA1:
451                 return 1;
452         case SOC15_IH_CLIENTID_SDMA2:
453                 return 2;
454         case SOC15_IH_CLIENTID_SDMA3:
455                 return 3;
456         case SOC15_IH_CLIENTID_SDMA4:
457                 return 4;
458         case SOC15_IH_CLIENTID_SDMA5:
459                 return 5;
460         case SOC15_IH_CLIENTID_SDMA6:
461                 return 6;
462         case SOC15_IH_CLIENTID_SDMA7:
463                 return 7;
464         default:
465                 break;
466         }
467         return -EINVAL;
468 }
469
470 static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
471 {
472         switch (adev->ip_versions[SDMA0_HWIP][0]) {
473         case IP_VERSION(4, 0, 0):
474                 soc15_program_register_sequence(adev,
475                                                 golden_settings_sdma_4,
476                                                 ARRAY_SIZE(golden_settings_sdma_4));
477                 soc15_program_register_sequence(adev,
478                                                 golden_settings_sdma_vg10,
479                                                 ARRAY_SIZE(golden_settings_sdma_vg10));
480                 break;
481         case IP_VERSION(4, 0, 1):
482                 soc15_program_register_sequence(adev,
483                                                 golden_settings_sdma_4,
484                                                 ARRAY_SIZE(golden_settings_sdma_4));
485                 soc15_program_register_sequence(adev,
486                                                 golden_settings_sdma_vg12,
487                                                 ARRAY_SIZE(golden_settings_sdma_vg12));
488                 break;
489         case IP_VERSION(4, 2, 0):
490                 soc15_program_register_sequence(adev,
491                                                 golden_settings_sdma0_4_2_init,
492                                                 ARRAY_SIZE(golden_settings_sdma0_4_2_init));
493                 soc15_program_register_sequence(adev,
494                                                 golden_settings_sdma0_4_2,
495                                                 ARRAY_SIZE(golden_settings_sdma0_4_2));
496                 soc15_program_register_sequence(adev,
497                                                 golden_settings_sdma1_4_2,
498                                                 ARRAY_SIZE(golden_settings_sdma1_4_2));
499                 break;
500         case IP_VERSION(4, 2, 2):
501                 soc15_program_register_sequence(adev,
502                                                 golden_settings_sdma_arct,
503                                                 ARRAY_SIZE(golden_settings_sdma_arct));
504                 break;
505         case IP_VERSION(4, 4, 0):
506                 soc15_program_register_sequence(adev,
507                                                 golden_settings_sdma_aldebaran,
508                                                 ARRAY_SIZE(golden_settings_sdma_aldebaran));
509                 break;
510         case IP_VERSION(4, 1, 0):
511         case IP_VERSION(4, 1, 1):
512                 soc15_program_register_sequence(adev,
513                                                 golden_settings_sdma_4_1,
514                                                 ARRAY_SIZE(golden_settings_sdma_4_1));
515                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
516                         soc15_program_register_sequence(adev,
517                                                         golden_settings_sdma_rv2,
518                                                         ARRAY_SIZE(golden_settings_sdma_rv2));
519                 else
520                         soc15_program_register_sequence(adev,
521                                                         golden_settings_sdma_rv1,
522                                                         ARRAY_SIZE(golden_settings_sdma_rv1));
523                 break;
524         case IP_VERSION(4, 1, 2):
525                 soc15_program_register_sequence(adev,
526                                                 golden_settings_sdma_4_3,
527                                                 ARRAY_SIZE(golden_settings_sdma_4_3));
528                 break;
529         default:
530                 break;
531         }
532 }
533
534 static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
535 {
536         int i;
537
538         /*
539          * The only chips with SDMAv4 and ULV are VG10 and VG20.
540          * Server SKUs take a different hysteresis setting from other SKUs.
541          */
542         switch (adev->ip_versions[SDMA0_HWIP][0]) {
543         case IP_VERSION(4, 0, 0):
544                 if (adev->pdev->device == 0x6860)
545                         break;
546                 return;
547         case IP_VERSION(4, 2, 0):
548                 if (adev->pdev->device == 0x66a1)
549                         break;
550                 return;
551         default:
552                 return;
553         }
554
555         for (i = 0; i < adev->sdma.num_instances; i++) {
556                 uint32_t temp;
557
558                 temp = RREG32_SDMA(i, mmSDMA0_ULV_CNTL);
559                 temp = REG_SET_FIELD(temp, SDMA0_ULV_CNTL, HYSTERESIS, 0x0);
560                 WREG32_SDMA(i, mmSDMA0_ULV_CNTL, temp);
561         }
562 }
563
564 /**
565  * sdma_v4_0_init_microcode - load ucode images from disk
566  *
567  * @adev: amdgpu_device pointer
568  *
569  * Use the firmware interface to load the ucode images into
570  * the driver (not loaded into hw).
571  * Returns 0 on success, error on failure.
572  */
573
574 // emulation only, won't work on real chip
575 // vega10 real chip need to use PSP to load firmware
576 static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
577 {
578         const char *chip_name;
579         char fw_name[30];
580         int ret, i;
581
582         DRM_DEBUG("\n");
583
584         switch (adev->ip_versions[SDMA0_HWIP][0]) {
585         case IP_VERSION(4, 0, 0):
586                 chip_name = "vega10";
587                 break;
588         case IP_VERSION(4, 0, 1):
589                 chip_name = "vega12";
590                 break;
591         case IP_VERSION(4, 2, 0):
592                 chip_name = "vega20";
593                 break;
594         case IP_VERSION(4, 1, 0):
595         case IP_VERSION(4, 1, 1):
596                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
597                         chip_name = "raven2";
598                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
599                         chip_name = "picasso";
600                 else
601                         chip_name = "raven";
602                 break;
603         case IP_VERSION(4, 2, 2):
604                 chip_name = "arcturus";
605                 break;
606         case IP_VERSION(4, 1, 2):
607                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
608                         chip_name = "renoir";
609                 else
610                         chip_name = "green_sardine";
611                 break;
612         case IP_VERSION(4, 4, 0):
613                 chip_name = "aldebaran";
614                 break;
615         default:
616                 BUG();
617         }
618
619         for (i = 0; i < adev->sdma.num_instances; i++) {
620                 if (i == 0)
621                         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
622                 else
623                         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
624                 if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
625                     adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
626                         /* Acturus & Aldebaran will leverage the same FW memory
627                            for every SDMA instance */
628                         ret = amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
629                         break;
630                 } else {
631                         ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
632                         if (ret)
633                                 return ret;
634                 }
635         }
636
637         return ret;
638 }
639
640 /**
641  * sdma_v4_0_ring_get_rptr - get the current read pointer
642  *
643  * @ring: amdgpu ring pointer
644  *
645  * Get the current rptr from the hardware (VEGA10+).
646  */
647 static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
648 {
649         u64 *rptr;
650
651         /* XXX check if swapping is necessary on BE */
652         rptr = ((u64 *)ring->rptr_cpu_addr);
653
654         DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
655         return ((*rptr) >> 2);
656 }
657
658 /**
659  * sdma_v4_0_ring_get_wptr - get the current write pointer
660  *
661  * @ring: amdgpu ring pointer
662  *
663  * Get the current wptr from the hardware (VEGA10+).
664  */
665 static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
666 {
667         struct amdgpu_device *adev = ring->adev;
668         u64 wptr;
669
670         if (ring->use_doorbell) {
671                 /* XXX check if swapping is necessary on BE */
672                 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
673                 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
674         } else {
675                 wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
676                 wptr = wptr << 32;
677                 wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
678                 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
679                                 ring->me, wptr);
680         }
681
682         return wptr >> 2;
683 }
684
685 /**
686  * sdma_v4_0_ring_set_wptr - commit the write pointer
687  *
688  * @ring: amdgpu ring pointer
689  *
690  * Write the wptr back to the hardware (VEGA10+).
691  */
692 static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
693 {
694         struct amdgpu_device *adev = ring->adev;
695
696         DRM_DEBUG("Setting write pointer\n");
697         if (ring->use_doorbell) {
698                 u64 *wb = (u64 *)ring->wptr_cpu_addr;
699
700                 DRM_DEBUG("Using doorbell -- "
701                                 "wptr_offs == 0x%08x "
702                                 "lower_32_bits(ring->wptr << 2) == 0x%08x "
703                                 "upper_32_bits(ring->wptr << 2) == 0x%08x\n",
704                                 ring->wptr_offs,
705                                 lower_32_bits(ring->wptr << 2),
706                                 upper_32_bits(ring->wptr << 2));
707                 /* XXX check if swapping is necessary on BE */
708                 WRITE_ONCE(*wb, (ring->wptr << 2));
709                 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
710                                 ring->doorbell_index, ring->wptr << 2);
711                 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
712         } else {
713                 DRM_DEBUG("Not using doorbell -- "
714                                 "mmSDMA%i_GFX_RB_WPTR == 0x%08x "
715                                 "mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
716                                 ring->me,
717                                 lower_32_bits(ring->wptr << 2),
718                                 ring->me,
719                                 upper_32_bits(ring->wptr << 2));
720                 WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR,
721                             lower_32_bits(ring->wptr << 2));
722                 WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI,
723                             upper_32_bits(ring->wptr << 2));
724         }
725 }
726
727 /**
728  * sdma_v4_0_page_ring_get_wptr - get the current write pointer
729  *
730  * @ring: amdgpu ring pointer
731  *
732  * Get the current wptr from the hardware (VEGA10+).
733  */
734 static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
735 {
736         struct amdgpu_device *adev = ring->adev;
737         u64 wptr;
738
739         if (ring->use_doorbell) {
740                 /* XXX check if swapping is necessary on BE */
741                 wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
742         } else {
743                 wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
744                 wptr = wptr << 32;
745                 wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
746         }
747
748         return wptr >> 2;
749 }
750
751 /**
752  * sdma_v4_0_page_ring_set_wptr - commit the write pointer
753  *
754  * @ring: amdgpu ring pointer
755  *
756  * Write the wptr back to the hardware (VEGA10+).
757  */
758 static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
759 {
760         struct amdgpu_device *adev = ring->adev;
761
762         if (ring->use_doorbell) {
763                 u64 *wb = (u64 *)ring->wptr_cpu_addr;
764
765                 /* XXX check if swapping is necessary on BE */
766                 WRITE_ONCE(*wb, (ring->wptr << 2));
767                 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
768         } else {
769                 uint64_t wptr = ring->wptr << 2;
770
771                 WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
772                             lower_32_bits(wptr));
773                 WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
774                             upper_32_bits(wptr));
775         }
776 }
777
778 static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
779 {
780         struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
781         int i;
782
783         for (i = 0; i < count; i++)
784                 if (sdma && sdma->burst_nop && (i == 0))
785                         amdgpu_ring_write(ring, ring->funcs->nop |
786                                 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
787                 else
788                         amdgpu_ring_write(ring, ring->funcs->nop);
789 }
790
791 /**
792  * sdma_v4_0_ring_emit_ib - Schedule an IB on the DMA engine
793  *
794  * @ring: amdgpu ring pointer
795  * @job: job to retrieve vmid from
796  * @ib: IB object to schedule
797  * @flags: unused
798  *
799  * Schedule an IB in the DMA ring (VEGA10).
800  */
801 static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
802                                    struct amdgpu_job *job,
803                                    struct amdgpu_ib *ib,
804                                    uint32_t flags)
805 {
806         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
807
808         /* IB packet must end on a 8 DW boundary */
809         sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
810
811         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
812                           SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
813         /* base must be 32 byte aligned */
814         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
815         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
816         amdgpu_ring_write(ring, ib->length_dw);
817         amdgpu_ring_write(ring, 0);
818         amdgpu_ring_write(ring, 0);
819
820 }
821
822 static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring,
823                                    int mem_space, int hdp,
824                                    uint32_t addr0, uint32_t addr1,
825                                    uint32_t ref, uint32_t mask,
826                                    uint32_t inv)
827 {
828         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
829                           SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
830                           SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
831                           SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
832         if (mem_space) {
833                 /* memory */
834                 amdgpu_ring_write(ring, addr0);
835                 amdgpu_ring_write(ring, addr1);
836         } else {
837                 /* registers */
838                 amdgpu_ring_write(ring, addr0 << 2);
839                 amdgpu_ring_write(ring, addr1 << 2);
840         }
841         amdgpu_ring_write(ring, ref); /* reference */
842         amdgpu_ring_write(ring, mask); /* mask */
843         amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
844                           SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
845 }
846
847 /**
848  * sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
849  *
850  * @ring: amdgpu ring pointer
851  *
852  * Emit an hdp flush packet on the requested DMA ring.
853  */
854 static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
855 {
856         struct amdgpu_device *adev = ring->adev;
857         u32 ref_and_mask = 0;
858         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
859
860         ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
861
862         sdma_v4_0_wait_reg_mem(ring, 0, 1,
863                                adev->nbio.funcs->get_hdp_flush_done_offset(adev),
864                                adev->nbio.funcs->get_hdp_flush_req_offset(adev),
865                                ref_and_mask, ref_and_mask, 10);
866 }
867
868 /**
869  * sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring
870  *
871  * @ring: amdgpu ring pointer
872  * @addr: address
873  * @seq: sequence number
874  * @flags: fence related flags
875  *
876  * Add a DMA fence packet to the ring to write
877  * the fence seq number and DMA trap packet to generate
878  * an interrupt if needed (VEGA10).
879  */
880 static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
881                                       unsigned flags)
882 {
883         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
884         /* write the fence */
885         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
886         /* zero in first two bits */
887         BUG_ON(addr & 0x3);
888         amdgpu_ring_write(ring, lower_32_bits(addr));
889         amdgpu_ring_write(ring, upper_32_bits(addr));
890         amdgpu_ring_write(ring, lower_32_bits(seq));
891
892         /* optionally write high bits as well */
893         if (write64bit) {
894                 addr += 4;
895                 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
896                 /* zero in first two bits */
897                 BUG_ON(addr & 0x3);
898                 amdgpu_ring_write(ring, lower_32_bits(addr));
899                 amdgpu_ring_write(ring, upper_32_bits(addr));
900                 amdgpu_ring_write(ring, upper_32_bits(seq));
901         }
902
903         /* generate an interrupt */
904         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
905         amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
906 }
907
908
909 /**
910  * sdma_v4_0_gfx_enable - enable the gfx async dma engines
911  *
912  * @adev: amdgpu_device pointer
913  * @enable: enable SDMA RB/IB
914  * control the gfx async dma ring buffers (VEGA10).
915  */
916 static void sdma_v4_0_gfx_enable(struct amdgpu_device *adev, bool enable)
917 {
918         u32 rb_cntl, ib_cntl;
919         int i;
920
921         amdgpu_sdma_unset_buffer_funcs_helper(adev);
922
923         for (i = 0; i < adev->sdma.num_instances; i++) {
924                 rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
925                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, enable ? 1 : 0);
926                 WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
927                 ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
928                 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, enable ? 1 : 0);
929                 WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
930         }
931 }
932
933 /**
934  * sdma_v4_0_rlc_stop - stop the compute async dma engines
935  *
936  * @adev: amdgpu_device pointer
937  *
938  * Stop the compute async dma queues (VEGA10).
939  */
940 static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
941 {
942         /* XXX todo */
943 }
944
945 /**
946  * sdma_v4_0_page_stop - stop the page async dma engines
947  *
948  * @adev: amdgpu_device pointer
949  *
950  * Stop the page async dma ring buffers (VEGA10).
951  */
952 static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
953 {
954         u32 rb_cntl, ib_cntl;
955         int i;
956
957         amdgpu_sdma_unset_buffer_funcs_helper(adev);
958
959         for (i = 0; i < adev->sdma.num_instances; i++) {
960                 rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
961                 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
962                                         RB_ENABLE, 0);
963                 WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
964                 ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
965                 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
966                                         IB_ENABLE, 0);
967                 WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
968         }
969 }
970
971 /**
972  * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch
973  *
974  * @adev: amdgpu_device pointer
975  * @enable: enable/disable the DMA MEs context switch.
976  *
977  * Halt or unhalt the async dma engines context switch (VEGA10).
978  */
979 static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
980 {
981         u32 f32_cntl, phase_quantum = 0;
982         int i;
983
984         if (amdgpu_sdma_phase_quantum) {
985                 unsigned value = amdgpu_sdma_phase_quantum;
986                 unsigned unit = 0;
987
988                 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
989                                 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
990                         value = (value + 1) >> 1;
991                         unit++;
992                 }
993                 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
994                             SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
995                         value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
996                                  SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
997                         unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
998                                 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
999                         WARN_ONCE(1,
1000                         "clamping sdma_phase_quantum to %uK clock cycles\n",
1001                                   value << unit);
1002                 }
1003                 phase_quantum =
1004                         value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
1005                         unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
1006         }
1007
1008         for (i = 0; i < adev->sdma.num_instances; i++) {
1009                 f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL);
1010                 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
1011                                 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
1012                 if (enable && amdgpu_sdma_phase_quantum) {
1013                         WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum);
1014                         WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum);
1015                         WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
1016                 }
1017                 WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
1018
1019                 /*
1020                  * Enable SDMA utilization. Its only supported on
1021                  * Arcturus for the moment and firmware version 14
1022                  * and above.
1023                  */
1024                 if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) &&
1025                     adev->sdma.instance[i].fw_version >= 14)
1026                         WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable);
1027                 /* Extend page fault timeout to avoid interrupt storm */
1028                 WREG32_SDMA(i, mmSDMA0_UTCL1_TIMEOUT, 0x00800080);
1029         }
1030
1031 }
1032
1033 /**
1034  * sdma_v4_0_enable - stop the async dma engines
1035  *
1036  * @adev: amdgpu_device pointer
1037  * @enable: enable/disable the DMA MEs.
1038  *
1039  * Halt or unhalt the async dma engines (VEGA10).
1040  */
1041 static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
1042 {
1043         u32 f32_cntl;
1044         int i;
1045
1046         if (!enable) {
1047                 sdma_v4_0_gfx_enable(adev, enable);
1048                 sdma_v4_0_rlc_stop(adev);
1049                 if (adev->sdma.has_page_queue)
1050                         sdma_v4_0_page_stop(adev);
1051         }
1052
1053         for (i = 0; i < adev->sdma.num_instances; i++) {
1054                 f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
1055                 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
1056                 WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl);
1057         }
1058 }
1059
1060 /*
1061  * sdma_v4_0_rb_cntl - get parameters for rb_cntl
1062  */
1063 static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
1064 {
1065         /* Set ring buffer size in dwords */
1066         uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
1067
1068         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
1069 #ifdef __BIG_ENDIAN
1070         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
1071         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
1072                                 RPTR_WRITEBACK_SWAP_ENABLE, 1);
1073 #endif
1074         return rb_cntl;
1075 }
1076
1077 /**
1078  * sdma_v4_0_gfx_resume - setup and start the async dma engines
1079  *
1080  * @adev: amdgpu_device pointer
1081  * @i: instance to resume
1082  *
1083  * Set up the gfx DMA ring buffers and enable them (VEGA10).
1084  * Returns 0 for success, error for failure.
1085  */
1086 static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
1087 {
1088         struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
1089         u32 rb_cntl, ib_cntl, wptr_poll_cntl;
1090         u32 doorbell;
1091         u32 doorbell_offset;
1092         u64 wptr_gpu_addr;
1093
1094         rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
1095         rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
1096         WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
1097
1098         /* Initialize the ring buffer's read and write pointers */
1099         WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0);
1100         WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0);
1101         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0);
1102         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0);
1103
1104         /* set the wb address whether it's enabled or not */
1105         WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI,
1106                upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
1107         WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO,
1108                lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
1109
1110         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
1111                                 RPTR_WRITEBACK_ENABLE, 1);
1112
1113         WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
1114         WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
1115
1116         ring->wptr = 0;
1117
1118         /* before programing wptr to a less value, need set minor_ptr_update first */
1119         WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1);
1120
1121         doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL);
1122         doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET);
1123
1124         doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE,
1125                                  ring->use_doorbell);
1126         doorbell_offset = REG_SET_FIELD(doorbell_offset,
1127                                         SDMA0_GFX_DOORBELL_OFFSET,
1128                                         OFFSET, ring->doorbell_index);
1129         WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
1130         WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
1131
1132         sdma_v4_0_ring_set_wptr(ring);
1133
1134         /* set minor_ptr_update to 0 after wptr programed */
1135         WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0);
1136
1137         /* setup the wptr shadow polling */
1138         wptr_gpu_addr = ring->wptr_gpu_addr;
1139         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO,
1140                     lower_32_bits(wptr_gpu_addr));
1141         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI,
1142                     upper_32_bits(wptr_gpu_addr));
1143         wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
1144         wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
1145                                        SDMA0_GFX_RB_WPTR_POLL_CNTL,
1146                                        F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
1147         WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
1148
1149         /* enable DMA RB */
1150         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
1151         WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
1152
1153         ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
1154         ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
1155 #ifdef __BIG_ENDIAN
1156         ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
1157 #endif
1158         /* enable DMA IBs */
1159         WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
1160
1161         ring->sched.ready = true;
1162 }
1163
1164 /**
1165  * sdma_v4_0_page_resume - setup and start the async dma engines
1166  *
1167  * @adev: amdgpu_device pointer
1168  * @i: instance to resume
1169  *
1170  * Set up the page DMA ring buffers and enable them (VEGA10).
1171  * Returns 0 for success, error for failure.
1172  */
1173 static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
1174 {
1175         struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
1176         u32 rb_cntl, ib_cntl, wptr_poll_cntl;
1177         u32 doorbell;
1178         u32 doorbell_offset;
1179         u64 wptr_gpu_addr;
1180
1181         rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
1182         rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
1183         WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
1184
1185         /* Initialize the ring buffer's read and write pointers */
1186         WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0);
1187         WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0);
1188         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0);
1189         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0);
1190
1191         /* set the wb address whether it's enabled or not */
1192         WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI,
1193                upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
1194         WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO,
1195                lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
1196
1197         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
1198                                 RPTR_WRITEBACK_ENABLE, 1);
1199
1200         WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
1201         WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
1202
1203         ring->wptr = 0;
1204
1205         /* before programing wptr to a less value, need set minor_ptr_update first */
1206         WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1);
1207
1208         doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL);
1209         doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET);
1210
1211         doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE,
1212                                  ring->use_doorbell);
1213         doorbell_offset = REG_SET_FIELD(doorbell_offset,
1214                                         SDMA0_PAGE_DOORBELL_OFFSET,
1215                                         OFFSET, ring->doorbell_index);
1216         WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
1217         WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
1218
1219         /* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */
1220         sdma_v4_0_page_ring_set_wptr(ring);
1221
1222         /* set minor_ptr_update to 0 after wptr programed */
1223         WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
1224
1225         /* setup the wptr shadow polling */
1226         wptr_gpu_addr = ring->wptr_gpu_addr;
1227         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO,
1228                     lower_32_bits(wptr_gpu_addr));
1229         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI,
1230                     upper_32_bits(wptr_gpu_addr));
1231         wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
1232         wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
1233                                        SDMA0_PAGE_RB_WPTR_POLL_CNTL,
1234                                        F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
1235         WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
1236
1237         /* enable DMA RB */
1238         rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1);
1239         WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
1240
1241         ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
1242         ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1);
1243 #ifdef __BIG_ENDIAN
1244         ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
1245 #endif
1246         /* enable DMA IBs */
1247         WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
1248
1249         ring->sched.ready = true;
1250 }
1251
1252 static void
1253 sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable)
1254 {
1255         uint32_t def, data;
1256
1257         if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) {
1258                 /* enable idle interrupt */
1259                 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
1260                 data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
1261
1262                 if (data != def)
1263                         WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
1264         } else {
1265                 /* disable idle interrupt */
1266                 def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
1267                 data &= ~SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
1268                 if (data != def)
1269                         WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
1270         }
1271 }
1272
1273 static void sdma_v4_1_init_power_gating(struct amdgpu_device *adev)
1274 {
1275         uint32_t def, data;
1276
1277         /* Enable HW based PG. */
1278         def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1279         data |= SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK;
1280         if (data != def)
1281                 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1282
1283         /* enable interrupt */
1284         def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL));
1285         data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK;
1286         if (data != def)
1287                 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL), data);
1288
1289         /* Configure hold time to filter in-valid power on/off request. Use default right now */
1290         def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
1291         data &= ~SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK;
1292         data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK);
1293         /* Configure switch time for hysteresis purpose. Use default right now */
1294         data &= ~SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK;
1295         data |= (mmSDMA0_POWER_CNTL_DEFAULT & SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK);
1296         if(data != def)
1297                 WREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), data);
1298 }
1299
1300 static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
1301 {
1302         if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA))
1303                 return;
1304
1305         switch (adev->ip_versions[SDMA0_HWIP][0]) {
1306         case IP_VERSION(4, 1, 0):
1307         case IP_VERSION(4, 1, 1):
1308         case IP_VERSION(4, 1, 2):
1309                 sdma_v4_1_init_power_gating(adev);
1310                 sdma_v4_1_update_power_gating(adev, true);
1311                 break;
1312         default:
1313                 break;
1314         }
1315 }
1316
1317 /**
1318  * sdma_v4_0_rlc_resume - setup and start the async dma engines
1319  *
1320  * @adev: amdgpu_device pointer
1321  *
1322  * Set up the compute DMA queues and enable them (VEGA10).
1323  * Returns 0 for success, error for failure.
1324  */
1325 static int sdma_v4_0_rlc_resume(struct amdgpu_device *adev)
1326 {
1327         sdma_v4_0_init_pg(adev);
1328
1329         return 0;
1330 }
1331
1332 /**
1333  * sdma_v4_0_load_microcode - load the sDMA ME ucode
1334  *
1335  * @adev: amdgpu_device pointer
1336  *
1337  * Loads the sDMA0/1 ucode.
1338  * Returns 0 for success, -EINVAL if the ucode is not available.
1339  */
1340 static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
1341 {
1342         const struct sdma_firmware_header_v1_0 *hdr;
1343         const __le32 *fw_data;
1344         u32 fw_size;
1345         int i, j;
1346
1347         /* halt the MEs */
1348         sdma_v4_0_enable(adev, false);
1349
1350         for (i = 0; i < adev->sdma.num_instances; i++) {
1351                 if (!adev->sdma.instance[i].fw)
1352                         return -EINVAL;
1353
1354                 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
1355                 amdgpu_ucode_print_sdma_hdr(&hdr->header);
1356                 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1357
1358                 fw_data = (const __le32 *)
1359                         (adev->sdma.instance[i].fw->data +
1360                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1361
1362                 WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0);
1363
1364                 for (j = 0; j < fw_size; j++)
1365                         WREG32_SDMA(i, mmSDMA0_UCODE_DATA,
1366                                     le32_to_cpup(fw_data++));
1367
1368                 WREG32_SDMA(i, mmSDMA0_UCODE_ADDR,
1369                             adev->sdma.instance[i].fw_version);
1370         }
1371
1372         return 0;
1373 }
1374
1375 /**
1376  * sdma_v4_0_start - setup and start the async dma engines
1377  *
1378  * @adev: amdgpu_device pointer
1379  *
1380  * Set up the DMA engines and enable them (VEGA10).
1381  * Returns 0 for success, error for failure.
1382  */
1383 static int sdma_v4_0_start(struct amdgpu_device *adev)
1384 {
1385         struct amdgpu_ring *ring;
1386         int i, r = 0;
1387
1388         if (amdgpu_sriov_vf(adev)) {
1389                 sdma_v4_0_ctx_switch_enable(adev, false);
1390                 sdma_v4_0_enable(adev, false);
1391         } else {
1392
1393                 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1394                         r = sdma_v4_0_load_microcode(adev);
1395                         if (r)
1396                                 return r;
1397                 }
1398
1399                 /* unhalt the MEs */
1400                 sdma_v4_0_enable(adev, true);
1401                 /* enable sdma ring preemption */
1402                 sdma_v4_0_ctx_switch_enable(adev, true);
1403         }
1404
1405         /* start the gfx rings and rlc compute queues */
1406         for (i = 0; i < adev->sdma.num_instances; i++) {
1407                 uint32_t temp;
1408
1409                 WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
1410                 sdma_v4_0_gfx_resume(adev, i);
1411                 if (adev->sdma.has_page_queue)
1412                         sdma_v4_0_page_resume(adev, i);
1413
1414                 /* set utc l1 enable flag always to 1 */
1415                 temp = RREG32_SDMA(i, mmSDMA0_CNTL);
1416                 temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
1417                 WREG32_SDMA(i, mmSDMA0_CNTL, temp);
1418
1419                 if (!amdgpu_sriov_vf(adev)) {
1420                         /* unhalt engine */
1421                         temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
1422                         temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
1423                         WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp);
1424                 }
1425         }
1426
1427         if (amdgpu_sriov_vf(adev)) {
1428                 sdma_v4_0_ctx_switch_enable(adev, true);
1429                 sdma_v4_0_enable(adev, true);
1430         } else {
1431                 r = sdma_v4_0_rlc_resume(adev);
1432                 if (r)
1433                         return r;
1434         }
1435
1436         for (i = 0; i < adev->sdma.num_instances; i++) {
1437                 ring = &adev->sdma.instance[i].ring;
1438
1439                 r = amdgpu_ring_test_helper(ring);
1440                 if (r)
1441                         return r;
1442
1443                 if (adev->sdma.has_page_queue) {
1444                         struct amdgpu_ring *page = &adev->sdma.instance[i].page;
1445
1446                         r = amdgpu_ring_test_helper(page);
1447                         if (r)
1448                                 return r;
1449
1450                         if (adev->mman.buffer_funcs_ring == page)
1451                                 amdgpu_ttm_set_buffer_funcs_status(adev, true);
1452                 }
1453
1454                 if (adev->mman.buffer_funcs_ring == ring)
1455                         amdgpu_ttm_set_buffer_funcs_status(adev, true);
1456         }
1457
1458         return r;
1459 }
1460
1461 /**
1462  * sdma_v4_0_ring_test_ring - simple async dma engine test
1463  *
1464  * @ring: amdgpu_ring structure holding ring information
1465  *
1466  * Test the DMA engine by writing using it to write an
1467  * value to memory. (VEGA10).
1468  * Returns 0 for success, error for failure.
1469  */
1470 static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
1471 {
1472         struct amdgpu_device *adev = ring->adev;
1473         unsigned i;
1474         unsigned index;
1475         int r;
1476         u32 tmp;
1477         u64 gpu_addr;
1478
1479         r = amdgpu_device_wb_get(adev, &index);
1480         if (r)
1481                 return r;
1482
1483         gpu_addr = adev->wb.gpu_addr + (index * 4);
1484         tmp = 0xCAFEDEAD;
1485         adev->wb.wb[index] = cpu_to_le32(tmp);
1486
1487         r = amdgpu_ring_alloc(ring, 5);
1488         if (r)
1489                 goto error_free_wb;
1490
1491         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1492                           SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
1493         amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
1494         amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
1495         amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
1496         amdgpu_ring_write(ring, 0xDEADBEEF);
1497         amdgpu_ring_commit(ring);
1498
1499         for (i = 0; i < adev->usec_timeout; i++) {
1500                 tmp = le32_to_cpu(adev->wb.wb[index]);
1501                 if (tmp == 0xDEADBEEF)
1502                         break;
1503                 udelay(1);
1504         }
1505
1506         if (i >= adev->usec_timeout)
1507                 r = -ETIMEDOUT;
1508
1509 error_free_wb:
1510         amdgpu_device_wb_free(adev, index);
1511         return r;
1512 }
1513
1514 /**
1515  * sdma_v4_0_ring_test_ib - test an IB on the DMA engine
1516  *
1517  * @ring: amdgpu_ring structure holding ring information
1518  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1519  *
1520  * Test a simple IB in the DMA ring (VEGA10).
1521  * Returns 0 on success, error on failure.
1522  */
1523 static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1524 {
1525         struct amdgpu_device *adev = ring->adev;
1526         struct amdgpu_ib ib;
1527         struct dma_fence *f = NULL;
1528         unsigned index;
1529         long r;
1530         u32 tmp = 0;
1531         u64 gpu_addr;
1532
1533         r = amdgpu_device_wb_get(adev, &index);
1534         if (r)
1535                 return r;
1536
1537         gpu_addr = adev->wb.gpu_addr + (index * 4);
1538         tmp = 0xCAFEDEAD;
1539         adev->wb.wb[index] = cpu_to_le32(tmp);
1540         memset(&ib, 0, sizeof(ib));
1541         r = amdgpu_ib_get(adev, NULL, 256,
1542                                         AMDGPU_IB_POOL_DIRECT, &ib);
1543         if (r)
1544                 goto err0;
1545
1546         ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1547                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1548         ib.ptr[1] = lower_32_bits(gpu_addr);
1549         ib.ptr[2] = upper_32_bits(gpu_addr);
1550         ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1551         ib.ptr[4] = 0xDEADBEEF;
1552         ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1553         ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1554         ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1555         ib.length_dw = 8;
1556
1557         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1558         if (r)
1559                 goto err1;
1560
1561         r = dma_fence_wait_timeout(f, false, timeout);
1562         if (r == 0) {
1563                 r = -ETIMEDOUT;
1564                 goto err1;
1565         } else if (r < 0) {
1566                 goto err1;
1567         }
1568         tmp = le32_to_cpu(adev->wb.wb[index]);
1569         if (tmp == 0xDEADBEEF)
1570                 r = 0;
1571         else
1572                 r = -EINVAL;
1573
1574 err1:
1575         amdgpu_ib_free(adev, &ib, NULL);
1576         dma_fence_put(f);
1577 err0:
1578         amdgpu_device_wb_free(adev, index);
1579         return r;
1580 }
1581
1582
1583 /**
1584  * sdma_v4_0_vm_copy_pte - update PTEs by copying them from the GART
1585  *
1586  * @ib: indirect buffer to fill with commands
1587  * @pe: addr of the page entry
1588  * @src: src addr to copy from
1589  * @count: number of page entries to update
1590  *
1591  * Update PTEs by copying them from the GART using sDMA (VEGA10).
1592  */
1593 static void sdma_v4_0_vm_copy_pte(struct amdgpu_ib *ib,
1594                                   uint64_t pe, uint64_t src,
1595                                   unsigned count)
1596 {
1597         unsigned bytes = count * 8;
1598
1599         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1600                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1601         ib->ptr[ib->length_dw++] = bytes - 1;
1602         ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1603         ib->ptr[ib->length_dw++] = lower_32_bits(src);
1604         ib->ptr[ib->length_dw++] = upper_32_bits(src);
1605         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1606         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1607
1608 }
1609
1610 /**
1611  * sdma_v4_0_vm_write_pte - update PTEs by writing them manually
1612  *
1613  * @ib: indirect buffer to fill with commands
1614  * @pe: addr of the page entry
1615  * @value: dst addr to write into pe
1616  * @count: number of page entries to update
1617  * @incr: increase next addr by incr bytes
1618  *
1619  * Update PTEs by writing them manually using sDMA (VEGA10).
1620  */
1621 static void sdma_v4_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1622                                    uint64_t value, unsigned count,
1623                                    uint32_t incr)
1624 {
1625         unsigned ndw = count * 2;
1626
1627         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1628                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1629         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1630         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1631         ib->ptr[ib->length_dw++] = ndw - 1;
1632         for (; ndw > 0; ndw -= 2) {
1633                 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1634                 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1635                 value += incr;
1636         }
1637 }
1638
1639 /**
1640  * sdma_v4_0_vm_set_pte_pde - update the page tables using sDMA
1641  *
1642  * @ib: indirect buffer to fill with commands
1643  * @pe: addr of the page entry
1644  * @addr: dst addr to write into pe
1645  * @count: number of page entries to update
1646  * @incr: increase next addr by incr bytes
1647  * @flags: access flags
1648  *
1649  * Update the page tables using sDMA (VEGA10).
1650  */
1651 static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1652                                      uint64_t pe,
1653                                      uint64_t addr, unsigned count,
1654                                      uint32_t incr, uint64_t flags)
1655 {
1656         /* for physically contiguous pages (vram) */
1657         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1658         ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1659         ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1660         ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1661         ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1662         ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1663         ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1664         ib->ptr[ib->length_dw++] = incr; /* increment size */
1665         ib->ptr[ib->length_dw++] = 0;
1666         ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1667 }
1668
1669 /**
1670  * sdma_v4_0_ring_pad_ib - pad the IB to the required number of dw
1671  *
1672  * @ring: amdgpu_ring structure holding ring information
1673  * @ib: indirect buffer to fill with padding
1674  */
1675 static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1676 {
1677         struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1678         u32 pad_count;
1679         int i;
1680
1681         pad_count = (-ib->length_dw) & 7;
1682         for (i = 0; i < pad_count; i++)
1683                 if (sdma && sdma->burst_nop && (i == 0))
1684                         ib->ptr[ib->length_dw++] =
1685                                 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1686                                 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1687                 else
1688                         ib->ptr[ib->length_dw++] =
1689                                 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1690 }
1691
1692
1693 /**
1694  * sdma_v4_0_ring_emit_pipeline_sync - sync the pipeline
1695  *
1696  * @ring: amdgpu_ring pointer
1697  *
1698  * Make sure all previous operations are completed (CIK).
1699  */
1700 static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1701 {
1702         uint32_t seq = ring->fence_drv.sync_seq;
1703         uint64_t addr = ring->fence_drv.gpu_addr;
1704
1705         /* wait for idle */
1706         sdma_v4_0_wait_reg_mem(ring, 1, 0,
1707                                addr & 0xfffffffc,
1708                                upper_32_bits(addr) & 0xffffffff,
1709                                seq, 0xffffffff, 4);
1710 }
1711
1712
1713 /**
1714  * sdma_v4_0_ring_emit_vm_flush - vm flush using sDMA
1715  *
1716  * @ring: amdgpu_ring pointer
1717  * @vmid: vmid number to use
1718  * @pd_addr: address
1719  *
1720  * Update the page table base and flush the VM TLB
1721  * using sDMA (VEGA10).
1722  */
1723 static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1724                                          unsigned vmid, uint64_t pd_addr)
1725 {
1726         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1727 }
1728
1729 static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
1730                                      uint32_t reg, uint32_t val)
1731 {
1732         amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1733                           SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1734         amdgpu_ring_write(ring, reg);
1735         amdgpu_ring_write(ring, val);
1736 }
1737
1738 static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1739                                          uint32_t val, uint32_t mask)
1740 {
1741         sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
1742 }
1743
1744 static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
1745 {
1746         uint fw_version = adev->sdma.instance[0].fw_version;
1747
1748         switch (adev->ip_versions[SDMA0_HWIP][0]) {
1749         case IP_VERSION(4, 0, 0):
1750                 return fw_version >= 430;
1751         case IP_VERSION(4, 0, 1):
1752                 /*return fw_version >= 31;*/
1753                 return false;
1754         case IP_VERSION(4, 2, 0):
1755                 return fw_version >= 123;
1756         default:
1757                 return false;
1758         }
1759 }
1760
1761 static int sdma_v4_0_early_init(void *handle)
1762 {
1763         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1764         int r;
1765
1766         r = sdma_v4_0_init_microcode(adev);
1767         if (r) {
1768                 DRM_ERROR("Failed to load sdma firmware!\n");
1769                 return r;
1770         }
1771
1772         /* TODO: Page queue breaks driver reload under SRIOV */
1773         if ((adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 0, 0)) &&
1774             amdgpu_sriov_vf((adev)))
1775                 adev->sdma.has_page_queue = false;
1776         else if (sdma_v4_0_fw_support_paging_queue(adev))
1777                 adev->sdma.has_page_queue = true;
1778
1779         sdma_v4_0_set_ring_funcs(adev);
1780         sdma_v4_0_set_buffer_funcs(adev);
1781         sdma_v4_0_set_vm_pte_funcs(adev);
1782         sdma_v4_0_set_irq_funcs(adev);
1783         sdma_v4_0_set_ras_funcs(adev);
1784
1785         return 0;
1786 }
1787
1788 static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
1789                 void *err_data,
1790                 struct amdgpu_iv_entry *entry);
1791
1792 static int sdma_v4_0_late_init(void *handle)
1793 {
1794         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1795
1796         sdma_v4_0_setup_ulv(adev);
1797
1798         if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1799                 if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
1800                     adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
1801                         adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
1802         }
1803
1804         return 0;
1805 }
1806
1807 static int sdma_v4_0_sw_init(void *handle)
1808 {
1809         struct amdgpu_ring *ring;
1810         int r, i;
1811         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1812
1813         /* SDMA trap event */
1814         for (i = 0; i < adev->sdma.num_instances; i++) {
1815                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1816                                       SDMA0_4_0__SRCID__SDMA_TRAP,
1817                                       &adev->sdma.trap_irq);
1818                 if (r)
1819                         return r;
1820         }
1821
1822         /* SDMA SRAM ECC event */
1823         for (i = 0; i < adev->sdma.num_instances; i++) {
1824                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1825                                       SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
1826                                       &adev->sdma.ecc_irq);
1827                 if (r)
1828                         return r;
1829         }
1830
1831         /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
1832         for (i = 0; i < adev->sdma.num_instances; i++) {
1833                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1834                                       SDMA0_4_0__SRCID__SDMA_VM_HOLE,
1835                                       &adev->sdma.vm_hole_irq);
1836                 if (r)
1837                         return r;
1838
1839                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1840                                       SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
1841                                       &adev->sdma.doorbell_invalid_irq);
1842                 if (r)
1843                         return r;
1844
1845                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1846                                       SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
1847                                       &adev->sdma.pool_timeout_irq);
1848                 if (r)
1849                         return r;
1850
1851                 r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
1852                                       SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
1853                                       &adev->sdma.srbm_write_irq);
1854                 if (r)
1855                         return r;
1856         }
1857
1858         for (i = 0; i < adev->sdma.num_instances; i++) {
1859                 ring = &adev->sdma.instance[i].ring;
1860                 ring->ring_obj = NULL;
1861                 ring->use_doorbell = true;
1862
1863                 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1864                                 ring->use_doorbell?"true":"false");
1865
1866                 /* doorbell size is 2 dwords, get DWORD offset */
1867                 ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1868
1869                 sprintf(ring->name, "sdma%d", i);
1870                 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1871                                      AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1872                                      AMDGPU_RING_PRIO_DEFAULT, NULL);
1873                 if (r)
1874                         return r;
1875
1876                 if (adev->sdma.has_page_queue) {
1877                         ring = &adev->sdma.instance[i].page;
1878                         ring->ring_obj = NULL;
1879                         ring->use_doorbell = true;
1880
1881                         /* paging queue use same doorbell index/routing as gfx queue
1882                          * with 0x400 (4096 dwords) offset on second doorbell page
1883                          */
1884                         ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1885                         ring->doorbell_index += 0x400;
1886
1887                         sprintf(ring->name, "page%d", i);
1888                         r = amdgpu_ring_init(adev, ring, 1024,
1889                                              &adev->sdma.trap_irq,
1890                                              AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1891                                              AMDGPU_RING_PRIO_DEFAULT, NULL);
1892                         if (r)
1893                                 return r;
1894                 }
1895         }
1896
1897         return r;
1898 }
1899
1900 static int sdma_v4_0_sw_fini(void *handle)
1901 {
1902         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1903         int i;
1904
1905         for (i = 0; i < adev->sdma.num_instances; i++) {
1906                 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1907                 if (adev->sdma.has_page_queue)
1908                         amdgpu_ring_fini(&adev->sdma.instance[i].page);
1909         }
1910
1911         if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0) ||
1912             adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
1913                 amdgpu_sdma_destroy_inst_ctx(adev, true);
1914         else
1915                 amdgpu_sdma_destroy_inst_ctx(adev, false);
1916
1917         return 0;
1918 }
1919
1920 static int sdma_v4_0_hw_init(void *handle)
1921 {
1922         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1923
1924         if (adev->flags & AMD_IS_APU)
1925                 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
1926
1927         if (!amdgpu_sriov_vf(adev))
1928                 sdma_v4_0_init_golden_registers(adev);
1929
1930         return sdma_v4_0_start(adev);
1931 }
1932
1933 static int sdma_v4_0_hw_fini(void *handle)
1934 {
1935         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1936         int i;
1937
1938         if (amdgpu_sriov_vf(adev)) {
1939                 /* disable the scheduler for SDMA */
1940                 amdgpu_sdma_unset_buffer_funcs_helper(adev);
1941                 return 0;
1942         }
1943
1944         for (i = 0; i < adev->sdma.num_instances; i++) {
1945                 amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
1946                                AMDGPU_SDMA_IRQ_INSTANCE0 + i);
1947         }
1948
1949         sdma_v4_0_ctx_switch_enable(adev, false);
1950         sdma_v4_0_enable(adev, false);
1951
1952         if (adev->flags & AMD_IS_APU)
1953                 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
1954
1955         return 0;
1956 }
1957
1958 static int sdma_v4_0_suspend(void *handle)
1959 {
1960         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1961
1962         /* SMU saves SDMA state for us */
1963         if (adev->in_s0ix) {
1964                 sdma_v4_0_gfx_enable(adev, false);
1965                 return 0;
1966         }
1967
1968         return sdma_v4_0_hw_fini(adev);
1969 }
1970
1971 static int sdma_v4_0_resume(void *handle)
1972 {
1973         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1974
1975         /* SMU restores SDMA state for us */
1976         if (adev->in_s0ix) {
1977                 sdma_v4_0_enable(adev, true);
1978                 sdma_v4_0_gfx_enable(adev, true);
1979                 amdgpu_ttm_set_buffer_funcs_status(adev, true);
1980                 return 0;
1981         }
1982
1983         return sdma_v4_0_hw_init(adev);
1984 }
1985
1986 static bool sdma_v4_0_is_idle(void *handle)
1987 {
1988         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1989         u32 i;
1990
1991         for (i = 0; i < adev->sdma.num_instances; i++) {
1992                 u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG);
1993
1994                 if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1995                         return false;
1996         }
1997
1998         return true;
1999 }
2000
2001 static int sdma_v4_0_wait_for_idle(void *handle)
2002 {
2003         unsigned i, j;
2004         u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
2005         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2006
2007         for (i = 0; i < adev->usec_timeout; i++) {
2008                 for (j = 0; j < adev->sdma.num_instances; j++) {
2009                         sdma[j] = RREG32_SDMA(j, mmSDMA0_STATUS_REG);
2010                         if (!(sdma[j] & SDMA0_STATUS_REG__IDLE_MASK))
2011                                 break;
2012                 }
2013                 if (j == adev->sdma.num_instances)
2014                         return 0;
2015                 udelay(1);
2016         }
2017         return -ETIMEDOUT;
2018 }
2019
2020 static int sdma_v4_0_soft_reset(void *handle)
2021 {
2022         /* todo */
2023
2024         return 0;
2025 }
2026
2027 static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
2028                                         struct amdgpu_irq_src *source,
2029                                         unsigned type,
2030                                         enum amdgpu_interrupt_state state)
2031 {
2032         u32 sdma_cntl;
2033
2034         sdma_cntl = RREG32_SDMA(type, mmSDMA0_CNTL);
2035         sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
2036                        state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2037         WREG32_SDMA(type, mmSDMA0_CNTL, sdma_cntl);
2038
2039         return 0;
2040 }
2041
2042 static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
2043                                       struct amdgpu_irq_src *source,
2044                                       struct amdgpu_iv_entry *entry)
2045 {
2046         uint32_t instance;
2047
2048         DRM_DEBUG("IH: SDMA trap\n");
2049         instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
2050         switch (entry->ring_id) {
2051         case 0:
2052                 amdgpu_fence_process(&adev->sdma.instance[instance].ring);
2053                 break;
2054         case 1:
2055                 if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0))
2056                         amdgpu_fence_process(&adev->sdma.instance[instance].page);
2057                 break;
2058         case 2:
2059                 /* XXX compute */
2060                 break;
2061         case 3:
2062                 if (adev->ip_versions[SDMA0_HWIP][0] != IP_VERSION(4, 2, 0))
2063                         amdgpu_fence_process(&adev->sdma.instance[instance].page);
2064                 break;
2065         }
2066         return 0;
2067 }
2068
2069 static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
2070                 void *err_data,
2071                 struct amdgpu_iv_entry *entry)
2072 {
2073         int instance;
2074
2075         /* When “Full RAS” is enabled, the per-IP interrupt sources should
2076          * be disabled and the driver should only look for the aggregated
2077          * interrupt via sync flood
2078          */
2079         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
2080                 goto out;
2081
2082         instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
2083         if (instance < 0)
2084                 goto out;
2085
2086         amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
2087
2088 out:
2089         return AMDGPU_RAS_SUCCESS;
2090 }
2091
2092 static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
2093                                               struct amdgpu_irq_src *source,
2094                                               struct amdgpu_iv_entry *entry)
2095 {
2096         int instance;
2097
2098         DRM_ERROR("Illegal instruction in SDMA command stream\n");
2099
2100         instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
2101         if (instance < 0)
2102                 return 0;
2103
2104         switch (entry->ring_id) {
2105         case 0:
2106                 drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
2107                 break;
2108         }
2109         return 0;
2110 }
2111
2112 static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
2113                                         struct amdgpu_irq_src *source,
2114                                         unsigned type,
2115                                         enum amdgpu_interrupt_state state)
2116 {
2117         u32 sdma_edc_config;
2118
2119         sdma_edc_config = RREG32_SDMA(type, mmSDMA0_EDC_CONFIG);
2120         sdma_edc_config = REG_SET_FIELD(sdma_edc_config, SDMA0_EDC_CONFIG, ECC_INT_ENABLE,
2121                        state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
2122         WREG32_SDMA(type, mmSDMA0_EDC_CONFIG, sdma_edc_config);
2123
2124         return 0;
2125 }
2126
2127 static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
2128                                               struct amdgpu_iv_entry *entry)
2129 {
2130         int instance;
2131         struct amdgpu_task_info task_info;
2132         u64 addr;
2133
2134         instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
2135         if (instance < 0 || instance >= adev->sdma.num_instances) {
2136                 dev_err(adev->dev, "sdma instance invalid %d\n", instance);
2137                 return -EINVAL;
2138         }
2139
2140         addr = (u64)entry->src_data[0] << 12;
2141         addr |= ((u64)entry->src_data[1] & 0xf) << 44;
2142
2143         memset(&task_info, 0, sizeof(struct amdgpu_task_info));
2144         amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
2145
2146         dev_dbg_ratelimited(adev->dev,
2147                    "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u "
2148                    "pasid:%u, for process %s pid %d thread %s pid %d\n",
2149                    instance, addr, entry->src_id, entry->ring_id, entry->vmid,
2150                    entry->pasid, task_info.process_name, task_info.tgid,
2151                    task_info.task_name, task_info.pid);
2152         return 0;
2153 }
2154
2155 static int sdma_v4_0_process_vm_hole_irq(struct amdgpu_device *adev,
2156                                               struct amdgpu_irq_src *source,
2157                                               struct amdgpu_iv_entry *entry)
2158 {
2159         dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n");
2160         sdma_v4_0_print_iv_entry(adev, entry);
2161         return 0;
2162 }
2163
2164 static int sdma_v4_0_process_doorbell_invalid_irq(struct amdgpu_device *adev,
2165                                               struct amdgpu_irq_src *source,
2166                                               struct amdgpu_iv_entry *entry)
2167 {
2168         dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n");
2169         sdma_v4_0_print_iv_entry(adev, entry);
2170         return 0;
2171 }
2172
2173 static int sdma_v4_0_process_pool_timeout_irq(struct amdgpu_device *adev,
2174                                               struct amdgpu_irq_src *source,
2175                                               struct amdgpu_iv_entry *entry)
2176 {
2177         dev_dbg_ratelimited(adev->dev,
2178                 "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n");
2179         sdma_v4_0_print_iv_entry(adev, entry);
2180         return 0;
2181 }
2182
2183 static int sdma_v4_0_process_srbm_write_irq(struct amdgpu_device *adev,
2184                                               struct amdgpu_irq_src *source,
2185                                               struct amdgpu_iv_entry *entry)
2186 {
2187         dev_dbg_ratelimited(adev->dev,
2188                 "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n");
2189         sdma_v4_0_print_iv_entry(adev, entry);
2190         return 0;
2191 }
2192
2193 static void sdma_v4_0_update_medium_grain_clock_gating(
2194                 struct amdgpu_device *adev,
2195                 bool enable)
2196 {
2197         uint32_t data, def;
2198         int i;
2199
2200         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
2201                 for (i = 0; i < adev->sdma.num_instances; i++) {
2202                         def = data = RREG32_SDMA(i, mmSDMA0_CLK_CTRL);
2203                         data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
2204                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
2205                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
2206                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
2207                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
2208                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
2209                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
2210                                   SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
2211                         if (def != data)
2212                                 WREG32_SDMA(i, mmSDMA0_CLK_CTRL, data);
2213                 }
2214         } else {
2215                 for (i = 0; i < adev->sdma.num_instances; i++) {
2216                         def = data = RREG32_SDMA(i, mmSDMA0_CLK_CTRL);
2217                         data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
2218                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
2219                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
2220                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
2221                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
2222                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
2223                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
2224                                  SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
2225                         if (def != data)
2226                                 WREG32_SDMA(i, mmSDMA0_CLK_CTRL, data);
2227                 }
2228         }
2229 }
2230
2231
2232 static void sdma_v4_0_update_medium_grain_light_sleep(
2233                 struct amdgpu_device *adev,
2234                 bool enable)
2235 {
2236         uint32_t data, def;
2237         int i;
2238
2239         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
2240                 for (i = 0; i < adev->sdma.num_instances; i++) {
2241                         /* 1-not override: enable sdma mem light sleep */
2242                         def = data = RREG32_SDMA(0, mmSDMA0_POWER_CNTL);
2243                         data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
2244                         if (def != data)
2245                                 WREG32_SDMA(0, mmSDMA0_POWER_CNTL, data);
2246                 }
2247         } else {
2248                 for (i = 0; i < adev->sdma.num_instances; i++) {
2249                 /* 0-override:disable sdma mem light sleep */
2250                         def = data = RREG32_SDMA(0, mmSDMA0_POWER_CNTL);
2251                         data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
2252                         if (def != data)
2253                                 WREG32_SDMA(0, mmSDMA0_POWER_CNTL, data);
2254                 }
2255         }
2256 }
2257
2258 static int sdma_v4_0_set_clockgating_state(void *handle,
2259                                           enum amd_clockgating_state state)
2260 {
2261         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2262
2263         if (amdgpu_sriov_vf(adev))
2264                 return 0;
2265
2266         sdma_v4_0_update_medium_grain_clock_gating(adev,
2267                         state == AMD_CG_STATE_GATE);
2268         sdma_v4_0_update_medium_grain_light_sleep(adev,
2269                         state == AMD_CG_STATE_GATE);
2270         return 0;
2271 }
2272
2273 static int sdma_v4_0_set_powergating_state(void *handle,
2274                                           enum amd_powergating_state state)
2275 {
2276         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2277
2278         switch (adev->ip_versions[SDMA0_HWIP][0]) {
2279         case IP_VERSION(4, 1, 0):
2280         case IP_VERSION(4, 1, 1):
2281         case IP_VERSION(4, 1, 2):
2282                 sdma_v4_1_update_power_gating(adev,
2283                                 state == AMD_PG_STATE_GATE);
2284                 break;
2285         default:
2286                 break;
2287         }
2288
2289         return 0;
2290 }
2291
2292 static void sdma_v4_0_get_clockgating_state(void *handle, u64 *flags)
2293 {
2294         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2295         int data;
2296
2297         if (amdgpu_sriov_vf(adev))
2298                 *flags = 0;
2299
2300         /* AMD_CG_SUPPORT_SDMA_MGCG */
2301         data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL));
2302         if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK))
2303                 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
2304
2305         /* AMD_CG_SUPPORT_SDMA_LS */
2306         data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL));
2307         if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
2308                 *flags |= AMD_CG_SUPPORT_SDMA_LS;
2309 }
2310
2311 const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
2312         .name = "sdma_v4_0",
2313         .early_init = sdma_v4_0_early_init,
2314         .late_init = sdma_v4_0_late_init,
2315         .sw_init = sdma_v4_0_sw_init,
2316         .sw_fini = sdma_v4_0_sw_fini,
2317         .hw_init = sdma_v4_0_hw_init,
2318         .hw_fini = sdma_v4_0_hw_fini,
2319         .suspend = sdma_v4_0_suspend,
2320         .resume = sdma_v4_0_resume,
2321         .is_idle = sdma_v4_0_is_idle,
2322         .wait_for_idle = sdma_v4_0_wait_for_idle,
2323         .soft_reset = sdma_v4_0_soft_reset,
2324         .set_clockgating_state = sdma_v4_0_set_clockgating_state,
2325         .set_powergating_state = sdma_v4_0_set_powergating_state,
2326         .get_clockgating_state = sdma_v4_0_get_clockgating_state,
2327 };
2328
2329 static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
2330         .type = AMDGPU_RING_TYPE_SDMA,
2331         .align_mask = 0xf,
2332         .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2333         .support_64bit_ptrs = true,
2334         .secure_submission_supported = true,
2335         .vmhub = AMDGPU_MMHUB_0,
2336         .get_rptr = sdma_v4_0_ring_get_rptr,
2337         .get_wptr = sdma_v4_0_ring_get_wptr,
2338         .set_wptr = sdma_v4_0_ring_set_wptr,
2339         .emit_frame_size =
2340                 6 + /* sdma_v4_0_ring_emit_hdp_flush */
2341                 3 + /* hdp invalidate */
2342                 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
2343                 /* sdma_v4_0_ring_emit_vm_flush */
2344                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2345                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2346                 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
2347         .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
2348         .emit_ib = sdma_v4_0_ring_emit_ib,
2349         .emit_fence = sdma_v4_0_ring_emit_fence,
2350         .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
2351         .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
2352         .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
2353         .test_ring = sdma_v4_0_ring_test_ring,
2354         .test_ib = sdma_v4_0_ring_test_ib,
2355         .insert_nop = sdma_v4_0_ring_insert_nop,
2356         .pad_ib = sdma_v4_0_ring_pad_ib,
2357         .emit_wreg = sdma_v4_0_ring_emit_wreg,
2358         .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
2359         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2360 };
2361
2362 /*
2363  * On Arcturus, SDMA instance 5~7 has a different vmhub type(AMDGPU_MMHUB_1).
2364  * So create a individual constant ring_funcs for those instances.
2365  */
2366 static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = {
2367         .type = AMDGPU_RING_TYPE_SDMA,
2368         .align_mask = 0xf,
2369         .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2370         .support_64bit_ptrs = true,
2371         .secure_submission_supported = true,
2372         .vmhub = AMDGPU_MMHUB_1,
2373         .get_rptr = sdma_v4_0_ring_get_rptr,
2374         .get_wptr = sdma_v4_0_ring_get_wptr,
2375         .set_wptr = sdma_v4_0_ring_set_wptr,
2376         .emit_frame_size =
2377                 6 + /* sdma_v4_0_ring_emit_hdp_flush */
2378                 3 + /* hdp invalidate */
2379                 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
2380                 /* sdma_v4_0_ring_emit_vm_flush */
2381                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2382                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2383                 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
2384         .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
2385         .emit_ib = sdma_v4_0_ring_emit_ib,
2386         .emit_fence = sdma_v4_0_ring_emit_fence,
2387         .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
2388         .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
2389         .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
2390         .test_ring = sdma_v4_0_ring_test_ring,
2391         .test_ib = sdma_v4_0_ring_test_ib,
2392         .insert_nop = sdma_v4_0_ring_insert_nop,
2393         .pad_ib = sdma_v4_0_ring_pad_ib,
2394         .emit_wreg = sdma_v4_0_ring_emit_wreg,
2395         .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
2396         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2397 };
2398
2399 static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
2400         .type = AMDGPU_RING_TYPE_SDMA,
2401         .align_mask = 0xf,
2402         .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2403         .support_64bit_ptrs = true,
2404         .secure_submission_supported = true,
2405         .vmhub = AMDGPU_MMHUB_0,
2406         .get_rptr = sdma_v4_0_ring_get_rptr,
2407         .get_wptr = sdma_v4_0_page_ring_get_wptr,
2408         .set_wptr = sdma_v4_0_page_ring_set_wptr,
2409         .emit_frame_size =
2410                 6 + /* sdma_v4_0_ring_emit_hdp_flush */
2411                 3 + /* hdp invalidate */
2412                 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
2413                 /* sdma_v4_0_ring_emit_vm_flush */
2414                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2415                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2416                 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
2417         .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
2418         .emit_ib = sdma_v4_0_ring_emit_ib,
2419         .emit_fence = sdma_v4_0_ring_emit_fence,
2420         .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
2421         .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
2422         .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
2423         .test_ring = sdma_v4_0_ring_test_ring,
2424         .test_ib = sdma_v4_0_ring_test_ib,
2425         .insert_nop = sdma_v4_0_ring_insert_nop,
2426         .pad_ib = sdma_v4_0_ring_pad_ib,
2427         .emit_wreg = sdma_v4_0_ring_emit_wreg,
2428         .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
2429         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2430 };
2431
2432 static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs_2nd_mmhub = {
2433         .type = AMDGPU_RING_TYPE_SDMA,
2434         .align_mask = 0xf,
2435         .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2436         .support_64bit_ptrs = true,
2437         .secure_submission_supported = true,
2438         .vmhub = AMDGPU_MMHUB_1,
2439         .get_rptr = sdma_v4_0_ring_get_rptr,
2440         .get_wptr = sdma_v4_0_page_ring_get_wptr,
2441         .set_wptr = sdma_v4_0_page_ring_set_wptr,
2442         .emit_frame_size =
2443                 6 + /* sdma_v4_0_ring_emit_hdp_flush */
2444                 3 + /* hdp invalidate */
2445                 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
2446                 /* sdma_v4_0_ring_emit_vm_flush */
2447                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2448                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2449                 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
2450         .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
2451         .emit_ib = sdma_v4_0_ring_emit_ib,
2452         .emit_fence = sdma_v4_0_ring_emit_fence,
2453         .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
2454         .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
2455         .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
2456         .test_ring = sdma_v4_0_ring_test_ring,
2457         .test_ib = sdma_v4_0_ring_test_ib,
2458         .insert_nop = sdma_v4_0_ring_insert_nop,
2459         .pad_ib = sdma_v4_0_ring_pad_ib,
2460         .emit_wreg = sdma_v4_0_ring_emit_wreg,
2461         .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
2462         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2463 };
2464
2465 static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
2466 {
2467         int i;
2468
2469         for (i = 0; i < adev->sdma.num_instances; i++) {
2470                 if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
2471                         adev->sdma.instance[i].ring.funcs =
2472                                         &sdma_v4_0_ring_funcs_2nd_mmhub;
2473                 else
2474                         adev->sdma.instance[i].ring.funcs =
2475                                         &sdma_v4_0_ring_funcs;
2476                 adev->sdma.instance[i].ring.me = i;
2477                 if (adev->sdma.has_page_queue) {
2478                         if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
2479                                 adev->sdma.instance[i].page.funcs =
2480                                         &sdma_v4_0_page_ring_funcs_2nd_mmhub;
2481                         else
2482                                 adev->sdma.instance[i].page.funcs =
2483                                         &sdma_v4_0_page_ring_funcs;
2484                         adev->sdma.instance[i].page.me = i;
2485                 }
2486         }
2487 }
2488
2489 static const struct amdgpu_irq_src_funcs sdma_v4_0_trap_irq_funcs = {
2490         .set = sdma_v4_0_set_trap_irq_state,
2491         .process = sdma_v4_0_process_trap_irq,
2492 };
2493
2494 static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
2495         .process = sdma_v4_0_process_illegal_inst_irq,
2496 };
2497
2498 static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
2499         .set = sdma_v4_0_set_ecc_irq_state,
2500         .process = amdgpu_sdma_process_ecc_irq,
2501 };
2502
2503 static const struct amdgpu_irq_src_funcs sdma_v4_0_vm_hole_irq_funcs = {
2504         .process = sdma_v4_0_process_vm_hole_irq,
2505 };
2506
2507 static const struct amdgpu_irq_src_funcs sdma_v4_0_doorbell_invalid_irq_funcs = {
2508         .process = sdma_v4_0_process_doorbell_invalid_irq,
2509 };
2510
2511 static const struct amdgpu_irq_src_funcs sdma_v4_0_pool_timeout_irq_funcs = {
2512         .process = sdma_v4_0_process_pool_timeout_irq,
2513 };
2514
2515 static const struct amdgpu_irq_src_funcs sdma_v4_0_srbm_write_irq_funcs = {
2516         .process = sdma_v4_0_process_srbm_write_irq,
2517 };
2518
2519 static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
2520 {
2521         adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
2522         adev->sdma.ecc_irq.num_types = adev->sdma.num_instances;
2523         /*For Arcturus and Aldebaran, add another 4 irq handler*/
2524         switch (adev->sdma.num_instances) {
2525         case 5:
2526         case 8:
2527                 adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances;
2528                 adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
2529                 adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
2530                 adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
2531                 break;
2532         default:
2533                 break;
2534         }
2535         adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
2536         adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
2537         adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs;
2538         adev->sdma.vm_hole_irq.funcs = &sdma_v4_0_vm_hole_irq_funcs;
2539         adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_0_doorbell_invalid_irq_funcs;
2540         adev->sdma.pool_timeout_irq.funcs = &sdma_v4_0_pool_timeout_irq_funcs;
2541         adev->sdma.srbm_write_irq.funcs = &sdma_v4_0_srbm_write_irq_funcs;
2542 }
2543
2544 /**
2545  * sdma_v4_0_emit_copy_buffer - copy buffer using the sDMA engine
2546  *
2547  * @ib: indirect buffer to copy to
2548  * @src_offset: src GPU address
2549  * @dst_offset: dst GPU address
2550  * @byte_count: number of bytes to xfer
2551  * @tmz: if a secure copy should be used
2552  *
2553  * Copy GPU buffers using the DMA engine (VEGA10/12).
2554  * Used by the amdgpu ttm implementation to move pages if
2555  * registered as the asic copy callback.
2556  */
2557 static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
2558                                        uint64_t src_offset,
2559                                        uint64_t dst_offset,
2560                                        uint32_t byte_count,
2561                                        bool tmz)
2562 {
2563         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
2564                 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
2565                 SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
2566         ib->ptr[ib->length_dw++] = byte_count - 1;
2567         ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
2568         ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
2569         ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
2570         ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2571         ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2572 }
2573
2574 /**
2575  * sdma_v4_0_emit_fill_buffer - fill buffer using the sDMA engine
2576  *
2577  * @ib: indirect buffer to copy to
2578  * @src_data: value to write to buffer
2579  * @dst_offset: dst GPU address
2580  * @byte_count: number of bytes to xfer
2581  *
2582  * Fill GPU buffers using the DMA engine (VEGA10/12).
2583  */
2584 static void sdma_v4_0_emit_fill_buffer(struct amdgpu_ib *ib,
2585                                        uint32_t src_data,
2586                                        uint64_t dst_offset,
2587                                        uint32_t byte_count)
2588 {
2589         ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
2590         ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2591         ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2592         ib->ptr[ib->length_dw++] = src_data;
2593         ib->ptr[ib->length_dw++] = byte_count - 1;
2594 }
2595
2596 static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
2597         .copy_max_bytes = 0x400000,
2598         .copy_num_dw = 7,
2599         .emit_copy_buffer = sdma_v4_0_emit_copy_buffer,
2600
2601         .fill_max_bytes = 0x400000,
2602         .fill_num_dw = 5,
2603         .emit_fill_buffer = sdma_v4_0_emit_fill_buffer,
2604 };
2605
2606 static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
2607 {
2608         adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
2609         if (adev->sdma.has_page_queue)
2610                 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
2611         else
2612                 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
2613 }
2614
2615 static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
2616         .copy_pte_num_dw = 7,
2617         .copy_pte = sdma_v4_0_vm_copy_pte,
2618
2619         .write_pte = sdma_v4_0_vm_write_pte,
2620         .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
2621 };
2622
2623 static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
2624 {
2625         struct drm_gpu_scheduler *sched;
2626         unsigned i;
2627
2628         adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
2629         for (i = 0; i < adev->sdma.num_instances; i++) {
2630                 if (adev->sdma.has_page_queue)
2631                         sched = &adev->sdma.instance[i].page.sched;
2632                 else
2633                         sched = &adev->sdma.instance[i].ring.sched;
2634                 adev->vm_manager.vm_pte_scheds[i] = sched;
2635         }
2636         adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
2637 }
2638
2639 static void sdma_v4_0_get_ras_error_count(uint32_t value,
2640                                         uint32_t instance,
2641                                         uint32_t *sec_count)
2642 {
2643         uint32_t i;
2644         uint32_t sec_cnt;
2645
2646         /* double bits error (multiple bits) error detection is not supported */
2647         for (i = 0; i < ARRAY_SIZE(sdma_v4_0_ras_fields); i++) {
2648                 /* the SDMA_EDC_COUNTER register in each sdma instance
2649                  * shares the same sed shift_mask
2650                  * */
2651                 sec_cnt = (value &
2652                         sdma_v4_0_ras_fields[i].sec_count_mask) >>
2653                         sdma_v4_0_ras_fields[i].sec_count_shift;
2654                 if (sec_cnt) {
2655                         DRM_INFO("Detected %s in SDMA%d, SED %d\n",
2656                                 sdma_v4_0_ras_fields[i].name,
2657                                 instance, sec_cnt);
2658                         *sec_count += sec_cnt;
2659                 }
2660         }
2661 }
2662
2663 static int sdma_v4_0_query_ras_error_count_by_instance(struct amdgpu_device *adev,
2664                         uint32_t instance, void *ras_error_status)
2665 {
2666         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
2667         uint32_t sec_count = 0;
2668         uint32_t reg_value = 0;
2669
2670         reg_value = RREG32_SDMA(instance, mmSDMA0_EDC_COUNTER);
2671         /* double bit error is not supported */
2672         if (reg_value)
2673                 sdma_v4_0_get_ras_error_count(reg_value,
2674                                 instance, &sec_count);
2675         /* err_data->ce_count should be initialized to 0
2676          * before calling into this function */
2677         err_data->ce_count += sec_count;
2678         /* double bit error is not supported
2679          * set ue count to 0 */
2680         err_data->ue_count = 0;
2681
2682         return 0;
2683 };
2684
2685 static void sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,  void *ras_error_status)
2686 {
2687         int i = 0;
2688
2689         for (i = 0; i < adev->sdma.num_instances; i++) {
2690                 if (sdma_v4_0_query_ras_error_count_by_instance(adev, i, ras_error_status)) {
2691                         dev_err(adev->dev, "Query ras error count failed in SDMA%d\n", i);
2692                         return;
2693                 }
2694         }
2695 }
2696
2697 static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
2698 {
2699         int i;
2700
2701         /* read back edc counter registers to clear the counters */
2702         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
2703                 for (i = 0; i < adev->sdma.num_instances; i++)
2704                         RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
2705         }
2706 }
2707
2708 const struct amdgpu_ras_block_hw_ops sdma_v4_0_ras_hw_ops = {
2709         .query_ras_error_count = sdma_v4_0_query_ras_error_count,
2710         .reset_ras_error_count = sdma_v4_0_reset_ras_error_count,
2711 };
2712
2713 static struct amdgpu_sdma_ras sdma_v4_0_ras = {
2714         .ras_block = {
2715                 .hw_ops = &sdma_v4_0_ras_hw_ops,
2716                 .ras_cb = sdma_v4_0_process_ras_data_cb,
2717         },
2718 };
2719
2720 static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
2721 {
2722         switch (adev->ip_versions[SDMA0_HWIP][0]) {
2723         case IP_VERSION(4, 2, 0):
2724         case IP_VERSION(4, 2, 2):
2725                 adev->sdma.ras = &sdma_v4_0_ras;
2726                 break;
2727         case IP_VERSION(4, 4, 0):
2728                 adev->sdma.ras = &sdma_v4_4_ras;
2729                 break;
2730         default:
2731                 break;
2732         }
2733
2734         if (adev->sdma.ras) {
2735                 amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block);
2736
2737                 strcpy(adev->sdma.ras->ras_block.ras_comm.name, "sdma");
2738                 adev->sdma.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
2739                 adev->sdma.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
2740                 adev->sdma.ras_if = &adev->sdma.ras->ras_block.ras_comm;
2741
2742                 /* If don't define special ras_late_init function, use default ras_late_init */
2743                 if (!adev->sdma.ras->ras_block.ras_late_init)
2744                         adev->sdma.ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
2745
2746                 /* If not defined special ras_cb function, use default ras_cb */
2747                 if (!adev->sdma.ras->ras_block.ras_cb)
2748                         adev->sdma.ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
2749         }
2750 }
2751
2752 const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
2753         .type = AMD_IP_BLOCK_TYPE_SDMA,
2754         .major = 4,
2755         .minor = 0,
2756         .rev = 0,
2757         .funcs = &sdma_v4_0_ip_funcs,
2758 };
This page took 0.205007 seconds and 4 git commands to generate.