]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/soc21.c
Merge tag 'for-6.5-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux.git] / drivers / gpu / drm / amd / amdgpu / soc21.c
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
35 #include "amdgpu_smu.h"
36 #include "atom.h"
37 #include "amd_pcie.h"
38
39 #include "gc/gc_11_0_0_offset.h"
40 #include "gc/gc_11_0_0_sh_mask.h"
41 #include "mp/mp_13_0_0_offset.h"
42
43 #include "soc15.h"
44 #include "soc15_common.h"
45 #include "soc21.h"
46 #include "mxgpu_nv.h"
47
48 static const struct amd_ip_funcs soc21_common_ip_funcs;
49
50 /* SOC21 */
51 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] =
52 {
53         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
54         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
55         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
56 };
57
58 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] =
59 {
60         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
61         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
62 };
63
64 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 =
65 {
66         .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0),
67         .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0,
68 };
69
70 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 =
71 {
72         .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1),
73         .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1,
74 };
75
76 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] =
77 {
78         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
79         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
80         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
81         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
82         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
83 };
84
85 static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] =
86 {
87         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
88         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
89         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
90         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
91 };
92
93 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 =
94 {
95         .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0),
96         .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0,
97 };
98
99 static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
100 {
101         .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1),
102         .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
103 };
104
105 /* SRIOV SOC21, not const since data is controlled by host */
106 static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
107         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
108         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
109         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
110 };
111
112 static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
113         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
114         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
115 };
116
117 static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
118         .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
119         .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
120 };
121
122 static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = {
123         .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
124         .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
125 };
126
127 static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = {
128         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
129         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
130         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
131         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
132         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
133         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
134         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
135         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
136 };
137
138 static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = {
139         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
140         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
141         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
142         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
143         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
144         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
145         {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
146 };
147
148 static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn0 = {
149         .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0),
150         .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
151 };
152
153 static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn1 = {
154         .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1),
155         .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
156 };
157
158 static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
159                                  const struct amdgpu_video_codecs **codecs)
160 {
161         if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
162                 return -EINVAL;
163
164         switch (adev->ip_versions[UVD_HWIP][0]) {
165         case IP_VERSION(4, 0, 0):
166         case IP_VERSION(4, 0, 2):
167         case IP_VERSION(4, 0, 4):
168                 if (amdgpu_sriov_vf(adev)) {
169                         if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
170                         !amdgpu_sriov_is_av1_support(adev)) {
171                                 if (encode)
172                                         *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn1;
173                                 else
174                                         *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn1;
175                         } else {
176                                 if (encode)
177                                         *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn0;
178                                 else
179                                         *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn0;
180                         }
181                 } else {
182                         if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)) {
183                                 if (encode)
184                                         *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
185                                 else
186                                         *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
187                         } else {
188                                 if (encode)
189                                         *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
190                                 else
191                                         *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
192                         }
193                 }
194                 return 0;
195         default:
196                 return -EINVAL;
197         }
198 }
199
200 static u32 soc21_didt_rreg(struct amdgpu_device *adev, u32 reg)
201 {
202         unsigned long flags, address, data;
203         u32 r;
204
205         address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX);
206         data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA);
207
208         spin_lock_irqsave(&adev->didt_idx_lock, flags);
209         WREG32(address, (reg));
210         r = RREG32(data);
211         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
212         return r;
213 }
214
215 static void soc21_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
216 {
217         unsigned long flags, address, data;
218
219         address = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_INDEX);
220         data = SOC15_REG_OFFSET(GC, 0, regDIDT_IND_DATA);
221
222         spin_lock_irqsave(&adev->didt_idx_lock, flags);
223         WREG32(address, (reg));
224         WREG32(data, (v));
225         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
226 }
227
228 static u32 soc21_get_config_memsize(struct amdgpu_device *adev)
229 {
230         return adev->nbio.funcs->get_memsize(adev);
231 }
232
233 static u32 soc21_get_xclk(struct amdgpu_device *adev)
234 {
235         return adev->clock.spll.reference_freq;
236 }
237
238
239 void soc21_grbm_select(struct amdgpu_device *adev,
240                      u32 me, u32 pipe, u32 queue, u32 vmid)
241 {
242         u32 grbm_gfx_cntl = 0;
243         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
244         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
245         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
246         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
247
248         WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, grbm_gfx_cntl);
249 }
250
251 static bool soc21_read_disabled_bios(struct amdgpu_device *adev)
252 {
253         /* todo */
254         return false;
255 }
256
257 static struct soc15_allowed_register_entry soc21_allowed_read_registers[] = {
258         { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS)},
259         { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS2)},
260         { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE0)},
261         { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE1)},
262         { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE2)},
263         { SOC15_REG_ENTRY(GC, 0, regGRBM_STATUS_SE3)},
264         { SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_STATUS_REG)},
265         { SOC15_REG_ENTRY(SDMA1, 0, regSDMA1_STATUS_REG)},
266         { SOC15_REG_ENTRY(GC, 0, regCP_STAT)},
267         { SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT1)},
268         { SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT2)},
269         { SOC15_REG_ENTRY(GC, 0, regCP_STALLED_STAT3)},
270         { SOC15_REG_ENTRY(GC, 0, regCP_CPF_BUSY_STAT)},
271         { SOC15_REG_ENTRY(GC, 0, regCP_CPF_STALLED_STAT1)},
272         { SOC15_REG_ENTRY(GC, 0, regCP_CPF_STATUS)},
273         { SOC15_REG_ENTRY(GC, 0, regCP_CPC_BUSY_STAT)},
274         { SOC15_REG_ENTRY(GC, 0, regCP_CPC_STALLED_STAT1)},
275         { SOC15_REG_ENTRY(GC, 0, regCP_CPC_STATUS)},
276         { SOC15_REG_ENTRY(GC, 0, regGB_ADDR_CONFIG)},
277 };
278
279 static uint32_t soc21_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
280                                          u32 sh_num, u32 reg_offset)
281 {
282         uint32_t val;
283
284         mutex_lock(&adev->grbm_idx_mutex);
285         if (se_num != 0xffffffff || sh_num != 0xffffffff)
286                 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
287
288         val = RREG32(reg_offset);
289
290         if (se_num != 0xffffffff || sh_num != 0xffffffff)
291                 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
292         mutex_unlock(&adev->grbm_idx_mutex);
293         return val;
294 }
295
296 static uint32_t soc21_get_register_value(struct amdgpu_device *adev,
297                                       bool indexed, u32 se_num,
298                                       u32 sh_num, u32 reg_offset)
299 {
300         if (indexed) {
301                 return soc21_read_indexed_register(adev, se_num, sh_num, reg_offset);
302         } else {
303                 if (reg_offset == SOC15_REG_OFFSET(GC, 0, regGB_ADDR_CONFIG) && adev->gfx.config.gb_addr_config)
304                         return adev->gfx.config.gb_addr_config;
305                 return RREG32(reg_offset);
306         }
307 }
308
309 static int soc21_read_register(struct amdgpu_device *adev, u32 se_num,
310                             u32 sh_num, u32 reg_offset, u32 *value)
311 {
312         uint32_t i;
313         struct soc15_allowed_register_entry  *en;
314
315         *value = 0;
316         for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) {
317                 en = &soc21_allowed_read_registers[i];
318                 if (!adev->reg_offset[en->hwip][en->inst])
319                         continue;
320                 else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
321                                         + en->reg_offset))
322                         continue;
323
324                 *value = soc21_get_register_value(adev,
325                                                soc21_allowed_read_registers[i].grbm_indexed,
326                                                se_num, sh_num, reg_offset);
327                 return 0;
328         }
329         return -EINVAL;
330 }
331
332 #if 0
333 static int soc21_asic_mode1_reset(struct amdgpu_device *adev)
334 {
335         u32 i;
336         int ret = 0;
337
338         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
339
340         /* disable BM */
341         pci_clear_master(adev->pdev);
342
343         amdgpu_device_cache_pci_state(adev->pdev);
344
345         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
346                 dev_info(adev->dev, "GPU smu mode1 reset\n");
347                 ret = amdgpu_dpm_mode1_reset(adev);
348         } else {
349                 dev_info(adev->dev, "GPU psp mode1 reset\n");
350                 ret = psp_gpu_reset(adev);
351         }
352
353         if (ret)
354                 dev_err(adev->dev, "GPU mode1 reset failed\n");
355         amdgpu_device_load_pci_state(adev->pdev);
356
357         /* wait for asic to come out of reset */
358         for (i = 0; i < adev->usec_timeout; i++) {
359                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
360
361                 if (memsize != 0xffffffff)
362                         break;
363                 udelay(1);
364         }
365
366         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
367
368         return ret;
369 }
370 #endif
371
372 static enum amd_reset_method
373 soc21_asic_reset_method(struct amdgpu_device *adev)
374 {
375         if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
376             amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
377             amdgpu_reset_method == AMD_RESET_METHOD_BACO)
378                 return amdgpu_reset_method;
379
380         if (amdgpu_reset_method != -1)
381                 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
382                                   amdgpu_reset_method);
383
384         switch (adev->ip_versions[MP1_HWIP][0]) {
385         case IP_VERSION(13, 0, 0):
386         case IP_VERSION(13, 0, 7):
387         case IP_VERSION(13, 0, 10):
388                 return AMD_RESET_METHOD_MODE1;
389         case IP_VERSION(13, 0, 4):
390         case IP_VERSION(13, 0, 11):
391                 return AMD_RESET_METHOD_MODE2;
392         default:
393                 if (amdgpu_dpm_is_baco_supported(adev))
394                         return AMD_RESET_METHOD_BACO;
395                 else
396                         return AMD_RESET_METHOD_MODE1;
397         }
398 }
399
400 static int soc21_asic_reset(struct amdgpu_device *adev)
401 {
402         int ret = 0;
403
404         switch (soc21_asic_reset_method(adev)) {
405         case AMD_RESET_METHOD_PCI:
406                 dev_info(adev->dev, "PCI reset\n");
407                 ret = amdgpu_device_pci_reset(adev);
408                 break;
409         case AMD_RESET_METHOD_BACO:
410                 dev_info(adev->dev, "BACO reset\n");
411                 ret = amdgpu_dpm_baco_reset(adev);
412                 break;
413         case AMD_RESET_METHOD_MODE2:
414                 dev_info(adev->dev, "MODE2 reset\n");
415                 ret = amdgpu_dpm_mode2_reset(adev);
416                 break;
417         default:
418                 dev_info(adev->dev, "MODE1 reset\n");
419                 ret = amdgpu_device_mode1_reset(adev);
420                 break;
421         }
422
423         return ret;
424 }
425
426 static int soc21_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
427 {
428         /* todo */
429         return 0;
430 }
431
432 static int soc21_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
433 {
434         /* todo */
435         return 0;
436 }
437
438 static void soc21_program_aspm(struct amdgpu_device *adev)
439 {
440         if (!amdgpu_device_should_use_aspm(adev))
441                 return;
442
443         if (!(adev->flags & AMD_IS_APU) &&
444             (adev->nbio.funcs->program_aspm))
445                 adev->nbio.funcs->program_aspm(adev);
446 }
447
448 const struct amdgpu_ip_block_version soc21_common_ip_block =
449 {
450         .type = AMD_IP_BLOCK_TYPE_COMMON,
451         .major = 1,
452         .minor = 0,
453         .rev = 0,
454         .funcs = &soc21_common_ip_funcs,
455 };
456
457 static bool soc21_need_full_reset(struct amdgpu_device *adev)
458 {
459         switch (adev->ip_versions[GC_HWIP][0]) {
460         case IP_VERSION(11, 0, 0):
461                 return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
462         case IP_VERSION(11, 0, 2):
463         case IP_VERSION(11, 0, 3):
464                 return false;
465         default:
466                 return true;
467         }
468 }
469
470 static bool soc21_need_reset_on_init(struct amdgpu_device *adev)
471 {
472         u32 sol_reg;
473
474         if (adev->flags & AMD_IS_APU)
475                 return false;
476
477         /* Check sOS sign of life register to confirm sys driver and sOS
478          * are already been loaded.
479          */
480         sol_reg = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
481         if (sol_reg)
482                 return true;
483
484         return false;
485 }
486
487 static uint64_t soc21_get_pcie_replay_count(struct amdgpu_device *adev)
488 {
489
490         /* TODO
491          * dummy implement for pcie_replay_count sysfs interface
492          * */
493
494         return 0;
495 }
496
497 static void soc21_init_doorbell_index(struct amdgpu_device *adev)
498 {
499         adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
500         adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
501         adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
502         adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
503         adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
504         adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
505         adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
506         adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
507         adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
508         adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
509         adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
510         adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
511         adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
512         adev->doorbell_index.gfx_userqueue_start =
513                 AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_START;
514         adev->doorbell_index.gfx_userqueue_end =
515                 AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_END;
516         adev->doorbell_index.mes_ring0 = AMDGPU_NAVI10_DOORBELL_MES_RING0;
517         adev->doorbell_index.mes_ring1 = AMDGPU_NAVI10_DOORBELL_MES_RING1;
518         adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
519         adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
520         adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
521         adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
522         adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
523         adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
524         adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
525         adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
526         adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
527
528         adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
529         adev->doorbell_index.sdma_doorbell_range = 20;
530 }
531
532 static void soc21_pre_asic_init(struct amdgpu_device *adev)
533 {
534 }
535
536 static int soc21_update_umd_stable_pstate(struct amdgpu_device *adev,
537                                           bool enter)
538 {
539         if (enter)
540                 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
541         else
542                 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
543
544         if (adev->gfx.funcs->update_perfmon_mgcg)
545                 adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
546
547         return 0;
548 }
549
550 static const struct amdgpu_asic_funcs soc21_asic_funcs =
551 {
552         .read_disabled_bios = &soc21_read_disabled_bios,
553         .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom,
554         .read_register = &soc21_read_register,
555         .reset = &soc21_asic_reset,
556         .reset_method = &soc21_asic_reset_method,
557         .get_xclk = &soc21_get_xclk,
558         .set_uvd_clocks = &soc21_set_uvd_clocks,
559         .set_vce_clocks = &soc21_set_vce_clocks,
560         .get_config_memsize = &soc21_get_config_memsize,
561         .init_doorbell_index = &soc21_init_doorbell_index,
562         .need_full_reset = &soc21_need_full_reset,
563         .need_reset_on_init = &soc21_need_reset_on_init,
564         .get_pcie_replay_count = &soc21_get_pcie_replay_count,
565         .supports_baco = &amdgpu_dpm_is_baco_supported,
566         .pre_asic_init = &soc21_pre_asic_init,
567         .query_video_codecs = &soc21_query_video_codecs,
568         .update_umd_stable_pstate = &soc21_update_umd_stable_pstate,
569 };
570
571 static int soc21_common_early_init(void *handle)
572 {
573 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
574         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
575
576         adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
577         adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
578         adev->smc_rreg = NULL;
579         adev->smc_wreg = NULL;
580         adev->pcie_rreg = &amdgpu_device_indirect_rreg;
581         adev->pcie_wreg = &amdgpu_device_indirect_wreg;
582         adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64;
583         adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64;
584         adev->pciep_rreg = amdgpu_device_pcie_port_rreg;
585         adev->pciep_wreg = amdgpu_device_pcie_port_wreg;
586
587         /* TODO: will add them during VCN v2 implementation */
588         adev->uvd_ctx_rreg = NULL;
589         adev->uvd_ctx_wreg = NULL;
590
591         adev->didt_rreg = &soc21_didt_rreg;
592         adev->didt_wreg = &soc21_didt_wreg;
593
594         adev->asic_funcs = &soc21_asic_funcs;
595
596         adev->rev_id = amdgpu_device_get_rev_id(adev);
597         adev->external_rev_id = 0xff;
598         switch (adev->ip_versions[GC_HWIP][0]) {
599         case IP_VERSION(11, 0, 0):
600                 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
601                         AMD_CG_SUPPORT_GFX_CGLS |
602 #if 0
603                         AMD_CG_SUPPORT_GFX_3D_CGCG |
604                         AMD_CG_SUPPORT_GFX_3D_CGLS |
605 #endif
606                         AMD_CG_SUPPORT_GFX_MGCG |
607                         AMD_CG_SUPPORT_REPEATER_FGCG |
608                         AMD_CG_SUPPORT_GFX_FGCG |
609                         AMD_CG_SUPPORT_GFX_PERF_CLK |
610                         AMD_CG_SUPPORT_VCN_MGCG |
611                         AMD_CG_SUPPORT_JPEG_MGCG |
612                         AMD_CG_SUPPORT_ATHUB_MGCG |
613                         AMD_CG_SUPPORT_ATHUB_LS |
614                         AMD_CG_SUPPORT_MC_MGCG |
615                         AMD_CG_SUPPORT_MC_LS |
616                         AMD_CG_SUPPORT_IH_CG |
617                         AMD_CG_SUPPORT_HDP_SD;
618                 adev->pg_flags = AMD_PG_SUPPORT_VCN |
619                         AMD_PG_SUPPORT_VCN_DPG |
620                         AMD_PG_SUPPORT_JPEG |
621                         AMD_PG_SUPPORT_ATHUB |
622                         AMD_PG_SUPPORT_MMHUB;
623                 adev->external_rev_id = adev->rev_id + 0x1; // TODO: need update
624                 break;
625         case IP_VERSION(11, 0, 2):
626                 adev->cg_flags =
627                         AMD_CG_SUPPORT_GFX_CGCG |
628                         AMD_CG_SUPPORT_GFX_CGLS |
629                         AMD_CG_SUPPORT_REPEATER_FGCG |
630                         AMD_CG_SUPPORT_VCN_MGCG |
631                         AMD_CG_SUPPORT_JPEG_MGCG |
632                         AMD_CG_SUPPORT_ATHUB_MGCG |
633                         AMD_CG_SUPPORT_ATHUB_LS |
634                         AMD_CG_SUPPORT_IH_CG |
635                         AMD_CG_SUPPORT_HDP_SD;
636                 adev->pg_flags =
637                         AMD_PG_SUPPORT_VCN |
638                         AMD_PG_SUPPORT_VCN_DPG |
639                         AMD_PG_SUPPORT_JPEG |
640                         AMD_PG_SUPPORT_ATHUB |
641                         AMD_PG_SUPPORT_MMHUB;
642                 adev->external_rev_id = adev->rev_id + 0x10;
643                 break;
644         case IP_VERSION(11, 0, 1):
645                 adev->cg_flags =
646                         AMD_CG_SUPPORT_GFX_CGCG |
647                         AMD_CG_SUPPORT_GFX_CGLS |
648                         AMD_CG_SUPPORT_GFX_MGCG |
649                         AMD_CG_SUPPORT_GFX_FGCG |
650                         AMD_CG_SUPPORT_REPEATER_FGCG |
651                         AMD_CG_SUPPORT_GFX_PERF_CLK |
652                         AMD_CG_SUPPORT_MC_MGCG |
653                         AMD_CG_SUPPORT_MC_LS |
654                         AMD_CG_SUPPORT_HDP_MGCG |
655                         AMD_CG_SUPPORT_HDP_LS |
656                         AMD_CG_SUPPORT_ATHUB_MGCG |
657                         AMD_CG_SUPPORT_ATHUB_LS |
658                         AMD_CG_SUPPORT_IH_CG |
659                         AMD_CG_SUPPORT_BIF_MGCG |
660                         AMD_CG_SUPPORT_BIF_LS |
661                         AMD_CG_SUPPORT_VCN_MGCG |
662                         AMD_CG_SUPPORT_JPEG_MGCG;
663                 adev->pg_flags =
664                         AMD_PG_SUPPORT_GFX_PG |
665                         AMD_PG_SUPPORT_VCN |
666                         AMD_PG_SUPPORT_VCN_DPG |
667                         AMD_PG_SUPPORT_JPEG;
668                 adev->external_rev_id = adev->rev_id + 0x1;
669                 break;
670         case IP_VERSION(11, 0, 3):
671                 adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
672                         AMD_CG_SUPPORT_JPEG_MGCG |
673                         AMD_CG_SUPPORT_GFX_CGCG |
674                         AMD_CG_SUPPORT_GFX_CGLS |
675                         AMD_CG_SUPPORT_REPEATER_FGCG |
676                         AMD_CG_SUPPORT_GFX_MGCG |
677                         AMD_CG_SUPPORT_HDP_SD |
678                         AMD_CG_SUPPORT_ATHUB_MGCG |
679                         AMD_CG_SUPPORT_ATHUB_LS;
680                 adev->pg_flags = AMD_PG_SUPPORT_VCN |
681                         AMD_PG_SUPPORT_VCN_DPG |
682                         AMD_PG_SUPPORT_JPEG;
683                 adev->external_rev_id = adev->rev_id + 0x20;
684                 break;
685         case IP_VERSION(11, 0, 4):
686                 adev->cg_flags =
687                         AMD_CG_SUPPORT_GFX_CGCG |
688                         AMD_CG_SUPPORT_GFX_CGLS |
689                         AMD_CG_SUPPORT_GFX_MGCG |
690                         AMD_CG_SUPPORT_GFX_FGCG |
691                         AMD_CG_SUPPORT_REPEATER_FGCG |
692                         AMD_CG_SUPPORT_GFX_PERF_CLK |
693                         AMD_CG_SUPPORT_MC_MGCG |
694                         AMD_CG_SUPPORT_MC_LS |
695                         AMD_CG_SUPPORT_HDP_MGCG |
696                         AMD_CG_SUPPORT_HDP_LS |
697                         AMD_CG_SUPPORT_ATHUB_MGCG |
698                         AMD_CG_SUPPORT_ATHUB_LS |
699                         AMD_CG_SUPPORT_IH_CG |
700                         AMD_CG_SUPPORT_BIF_MGCG |
701                         AMD_CG_SUPPORT_BIF_LS |
702                         AMD_CG_SUPPORT_VCN_MGCG |
703                         AMD_CG_SUPPORT_JPEG_MGCG;
704                 adev->pg_flags = AMD_PG_SUPPORT_VCN |
705                         AMD_PG_SUPPORT_VCN_DPG |
706                         AMD_PG_SUPPORT_GFX_PG |
707                         AMD_PG_SUPPORT_JPEG;
708                 adev->external_rev_id = adev->rev_id + 0x80;
709                 break;
710
711         default:
712                 /* FIXME: not supported yet */
713                 return -EINVAL;
714         }
715
716         if (amdgpu_sriov_vf(adev)) {
717                 amdgpu_virt_init_setting(adev);
718                 xgpu_nv_mailbox_set_irq_funcs(adev);
719         }
720
721         return 0;
722 }
723
724 static int soc21_common_late_init(void *handle)
725 {
726         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
727
728         if (amdgpu_sriov_vf(adev)) {
729                 xgpu_nv_mailbox_get_irq(adev);
730                 if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) ||
731                 !amdgpu_sriov_is_av1_support(adev)) {
732                         amdgpu_virt_update_sriov_video_codec(adev,
733                                                              sriov_vcn_4_0_0_video_codecs_encode_array_vcn1,
734                                                              ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1),
735                                                              sriov_vcn_4_0_0_video_codecs_decode_array_vcn1,
736                                                              ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1));
737                 } else {
738                         amdgpu_virt_update_sriov_video_codec(adev,
739                                                              sriov_vcn_4_0_0_video_codecs_encode_array_vcn0,
740                                                              ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0),
741                                                              sriov_vcn_4_0_0_video_codecs_decode_array_vcn0,
742                                                              ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0));
743                 }
744         } else {
745                 if (adev->nbio.ras &&
746                     adev->nbio.ras_err_event_athub_irq.funcs)
747                         /* don't need to fail gpu late init
748                          * if enabling athub_err_event interrupt failed
749                          * nbio v4_3 only support fatal error hanlding
750                          * just enable the interrupt directly */
751                         amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
752         }
753
754         /* Enable selfring doorbell aperture late because doorbell BAR
755          * aperture will change if resize BAR successfully in gmc sw_init.
756          */
757         adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
758
759         return 0;
760 }
761
762 static int soc21_common_sw_init(void *handle)
763 {
764         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
765
766         if (amdgpu_sriov_vf(adev))
767                 xgpu_nv_mailbox_add_irq_id(adev);
768
769         return 0;
770 }
771
772 static int soc21_common_sw_fini(void *handle)
773 {
774         return 0;
775 }
776
777 static int soc21_common_hw_init(void *handle)
778 {
779         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
780
781         /* enable aspm */
782         soc21_program_aspm(adev);
783         /* setup nbio registers */
784         adev->nbio.funcs->init_registers(adev);
785         /* remap HDP registers to a hole in mmio space,
786          * for the purpose of expose those registers
787          * to process space
788          */
789         if (adev->nbio.funcs->remap_hdp_registers)
790                 adev->nbio.funcs->remap_hdp_registers(adev);
791         /* enable the doorbell aperture */
792         adev->nbio.funcs->enable_doorbell_aperture(adev, true);
793
794         return 0;
795 }
796
797 static int soc21_common_hw_fini(void *handle)
798 {
799         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
800
801         /* Disable the doorbell aperture and selfring doorbell aperture
802          * separately in hw_fini because soc21_enable_doorbell_aperture
803          * has been removed and there is no need to delay disabling
804          * selfring doorbell.
805          */
806         adev->nbio.funcs->enable_doorbell_aperture(adev, false);
807         adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
808
809         if (amdgpu_sriov_vf(adev)) {
810                 xgpu_nv_mailbox_put_irq(adev);
811         } else {
812                 if (adev->nbio.ras &&
813                     adev->nbio.ras_err_event_athub_irq.funcs)
814                         amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
815         }
816
817         return 0;
818 }
819
820 static int soc21_common_suspend(void *handle)
821 {
822         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
823
824         return soc21_common_hw_fini(adev);
825 }
826
827 static int soc21_common_resume(void *handle)
828 {
829         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
830
831         return soc21_common_hw_init(adev);
832 }
833
834 static bool soc21_common_is_idle(void *handle)
835 {
836         return true;
837 }
838
839 static int soc21_common_wait_for_idle(void *handle)
840 {
841         return 0;
842 }
843
844 static int soc21_common_soft_reset(void *handle)
845 {
846         return 0;
847 }
848
849 static int soc21_common_set_clockgating_state(void *handle,
850                                            enum amd_clockgating_state state)
851 {
852         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
853
854         switch (adev->ip_versions[NBIO_HWIP][0]) {
855         case IP_VERSION(4, 3, 0):
856         case IP_VERSION(4, 3, 1):
857         case IP_VERSION(7, 7, 0):
858                 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
859                                 state == AMD_CG_STATE_GATE);
860                 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
861                                 state == AMD_CG_STATE_GATE);
862                 adev->hdp.funcs->update_clock_gating(adev,
863                                 state == AMD_CG_STATE_GATE);
864                 break;
865         default:
866                 break;
867         }
868         return 0;
869 }
870
871 static int soc21_common_set_powergating_state(void *handle,
872                                            enum amd_powergating_state state)
873 {
874         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
875
876         switch (adev->ip_versions[LSDMA_HWIP][0]) {
877         case IP_VERSION(6, 0, 0):
878         case IP_VERSION(6, 0, 2):
879                 adev->lsdma.funcs->update_memory_power_gating(adev,
880                                 state == AMD_PG_STATE_GATE);
881                 break;
882         default:
883                 break;
884         }
885
886         return 0;
887 }
888
889 static void soc21_common_get_clockgating_state(void *handle, u64 *flags)
890 {
891         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
892
893         adev->nbio.funcs->get_clockgating_state(adev, flags);
894
895         adev->hdp.funcs->get_clock_gating_state(adev, flags);
896
897         return;
898 }
899
900 static const struct amd_ip_funcs soc21_common_ip_funcs = {
901         .name = "soc21_common",
902         .early_init = soc21_common_early_init,
903         .late_init = soc21_common_late_init,
904         .sw_init = soc21_common_sw_init,
905         .sw_fini = soc21_common_sw_fini,
906         .hw_init = soc21_common_hw_init,
907         .hw_fini = soc21_common_hw_fini,
908         .suspend = soc21_common_suspend,
909         .resume = soc21_common_resume,
910         .is_idle = soc21_common_is_idle,
911         .wait_for_idle = soc21_common_wait_for_idle,
912         .soft_reset = soc21_common_soft_reset,
913         .set_clockgating_state = soc21_common_set_clockgating_state,
914         .set_powergating_state = soc21_common_set_powergating_state,
915         .get_clockgating_state = soc21_common_get_clockgating_state,
916 };
This page took 0.09187 seconds and 4 git commands to generate.