]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/nv.c
Merge tag 'pwm/for-5.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/thierry...
[linux.git] / drivers / gpu / drm / amd / amdgpu / nv.c
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_ih.h"
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
33 #include "amdgpu_ucode.h"
34 #include "amdgpu_psp.h"
35 #include "amdgpu_smu.h"
36 #include "atom.h"
37 #include "amd_pcie.h"
38
39 #include "gc/gc_10_1_0_offset.h"
40 #include "gc/gc_10_1_0_sh_mask.h"
41 #include "mp/mp_11_0_offset.h"
42
43 #include "soc15.h"
44 #include "soc15_common.h"
45 #include "gmc_v10_0.h"
46 #include "gfxhub_v2_0.h"
47 #include "mmhub_v2_0.h"
48 #include "nbio_v2_3.h"
49 #include "nbio_v7_2.h"
50 #include "hdp_v5_0.h"
51 #include "nv.h"
52 #include "navi10_ih.h"
53 #include "gfx_v10_0.h"
54 #include "sdma_v5_0.h"
55 #include "sdma_v5_2.h"
56 #include "vcn_v2_0.h"
57 #include "jpeg_v2_0.h"
58 #include "vcn_v3_0.h"
59 #include "jpeg_v3_0.h"
60 #include "dce_virtual.h"
61 #include "mes_v10_1.h"
62 #include "mxgpu_nv.h"
63 #include "smuio_v11_0.h"
64 #include "smuio_v11_0_6.h"
65
66 static const struct amd_ip_funcs nv_common_ip_funcs;
67
68 /*
69  * Indirect registers accessor
70  */
71 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
72 {
73         unsigned long address, data;
74         address = adev->nbio.funcs->get_pcie_index_offset(adev);
75         data = adev->nbio.funcs->get_pcie_data_offset(adev);
76
77         return amdgpu_device_indirect_rreg(adev, address, data, reg);
78 }
79
80 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
81 {
82         unsigned long address, data;
83
84         address = adev->nbio.funcs->get_pcie_index_offset(adev);
85         data = adev->nbio.funcs->get_pcie_data_offset(adev);
86
87         amdgpu_device_indirect_wreg(adev, address, data, reg, v);
88 }
89
90 static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
91 {
92         unsigned long address, data;
93         address = adev->nbio.funcs->get_pcie_index_offset(adev);
94         data = adev->nbio.funcs->get_pcie_data_offset(adev);
95
96         return amdgpu_device_indirect_rreg64(adev, address, data, reg);
97 }
98
99 static u32 nv_pcie_port_rreg(struct amdgpu_device *adev, u32 reg)
100 {
101         unsigned long flags, address, data;
102         u32 r;
103         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
104         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
105
106         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
107         WREG32(address, reg * 4);
108         (void)RREG32(address);
109         r = RREG32(data);
110         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
111         return r;
112 }
113
114 static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
115 {
116         unsigned long address, data;
117
118         address = adev->nbio.funcs->get_pcie_index_offset(adev);
119         data = adev->nbio.funcs->get_pcie_data_offset(adev);
120
121         amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
122 }
123
124 static void nv_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
125 {
126         unsigned long flags, address, data;
127
128         address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
129         data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
130
131         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
132         WREG32(address, reg * 4);
133         (void)RREG32(address);
134         WREG32(data, v);
135         (void)RREG32(data);
136         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
137 }
138
139 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
140 {
141         unsigned long flags, address, data;
142         u32 r;
143
144         address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
145         data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
146
147         spin_lock_irqsave(&adev->didt_idx_lock, flags);
148         WREG32(address, (reg));
149         r = RREG32(data);
150         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
151         return r;
152 }
153
154 static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
155 {
156         unsigned long flags, address, data;
157
158         address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
159         data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
160
161         spin_lock_irqsave(&adev->didt_idx_lock, flags);
162         WREG32(address, (reg));
163         WREG32(data, (v));
164         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
165 }
166
167 static u32 nv_get_config_memsize(struct amdgpu_device *adev)
168 {
169         return adev->nbio.funcs->get_memsize(adev);
170 }
171
172 static u32 nv_get_xclk(struct amdgpu_device *adev)
173 {
174         return adev->clock.spll.reference_freq;
175 }
176
177
178 void nv_grbm_select(struct amdgpu_device *adev,
179                      u32 me, u32 pipe, u32 queue, u32 vmid)
180 {
181         u32 grbm_gfx_cntl = 0;
182         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
183         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
184         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
185         grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
186
187         WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
188 }
189
190 static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
191 {
192         /* todo */
193 }
194
195 static bool nv_read_disabled_bios(struct amdgpu_device *adev)
196 {
197         /* todo */
198         return false;
199 }
200
201 static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
202                                   u8 *bios, u32 length_bytes)
203 {
204         u32 *dw_ptr;
205         u32 i, length_dw;
206         u32 rom_index_offset, rom_data_offset;
207
208         if (bios == NULL)
209                 return false;
210         if (length_bytes == 0)
211                 return false;
212         /* APU vbios image is part of sbios image */
213         if (adev->flags & AMD_IS_APU)
214                 return false;
215
216         dw_ptr = (u32 *)bios;
217         length_dw = ALIGN(length_bytes, 4) / 4;
218
219         rom_index_offset =
220                 adev->smuio.funcs->get_rom_index_offset(adev);
221         rom_data_offset =
222                 adev->smuio.funcs->get_rom_data_offset(adev);
223
224         /* set rom index to 0 */
225         WREG32(rom_index_offset, 0);
226         /* read out the rom data */
227         for (i = 0; i < length_dw; i++)
228                 dw_ptr[i] = RREG32(rom_data_offset);
229
230         return true;
231 }
232
233 static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
234         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
235         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
236         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
237         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
238         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
239         { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
240         { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
241         { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
242         { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
243         { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
244         { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
245         { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
246         { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
247         { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
248         { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
249         { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
250         { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
251         { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
252         { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
253 };
254
255 static uint32_t nv_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
256                                          u32 sh_num, u32 reg_offset)
257 {
258         uint32_t val;
259
260         mutex_lock(&adev->grbm_idx_mutex);
261         if (se_num != 0xffffffff || sh_num != 0xffffffff)
262                 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
263
264         val = RREG32(reg_offset);
265
266         if (se_num != 0xffffffff || sh_num != 0xffffffff)
267                 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
268         mutex_unlock(&adev->grbm_idx_mutex);
269         return val;
270 }
271
272 static uint32_t nv_get_register_value(struct amdgpu_device *adev,
273                                       bool indexed, u32 se_num,
274                                       u32 sh_num, u32 reg_offset)
275 {
276         if (indexed) {
277                 return nv_read_indexed_register(adev, se_num, sh_num, reg_offset);
278         } else {
279                 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
280                         return adev->gfx.config.gb_addr_config;
281                 return RREG32(reg_offset);
282         }
283 }
284
285 static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
286                             u32 sh_num, u32 reg_offset, u32 *value)
287 {
288         uint32_t i;
289         struct soc15_allowed_register_entry  *en;
290
291         *value = 0;
292         for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) {
293                 en = &nv_allowed_read_registers[i];
294                 if ((i == 7 && (adev->sdma.num_instances == 1)) || /* some asics don't have SDMA1 */
295                     reg_offset !=
296                     (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset))
297                         continue;
298
299                 *value = nv_get_register_value(adev,
300                                                nv_allowed_read_registers[i].grbm_indexed,
301                                                se_num, sh_num, reg_offset);
302                 return 0;
303         }
304         return -EINVAL;
305 }
306
307 static int nv_asic_mode1_reset(struct amdgpu_device *adev)
308 {
309         u32 i;
310         int ret = 0;
311
312         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
313
314         /* disable BM */
315         pci_clear_master(adev->pdev);
316
317         amdgpu_device_cache_pci_state(adev->pdev);
318
319         if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
320                 dev_info(adev->dev, "GPU smu mode1 reset\n");
321                 ret = amdgpu_dpm_mode1_reset(adev);
322         } else {
323                 dev_info(adev->dev, "GPU psp mode1 reset\n");
324                 ret = psp_gpu_reset(adev);
325         }
326
327         if (ret)
328                 dev_err(adev->dev, "GPU mode1 reset failed\n");
329         amdgpu_device_load_pci_state(adev->pdev);
330
331         /* wait for asic to come out of reset */
332         for (i = 0; i < adev->usec_timeout; i++) {
333                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
334
335                 if (memsize != 0xffffffff)
336                         break;
337                 udelay(1);
338         }
339
340         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
341
342         return ret;
343 }
344
345 static int nv_asic_mode2_reset(struct amdgpu_device *adev)
346 {
347         u32 i;
348         int ret = 0;
349
350         amdgpu_atombios_scratch_regs_engine_hung(adev, true);
351
352         /* disable BM */
353         pci_clear_master(adev->pdev);
354
355         amdgpu_device_cache_pci_state(adev->pdev);
356
357         ret = amdgpu_dpm_mode2_reset(adev);
358         if (ret)
359                 dev_err(adev->dev, "GPU mode2 reset failed\n");
360
361         amdgpu_device_load_pci_state(adev->pdev);
362
363         /* wait for asic to come out of reset */
364         for (i = 0; i < adev->usec_timeout; i++) {
365                 u32 memsize = adev->nbio.funcs->get_memsize(adev);
366
367                 if (memsize != 0xffffffff)
368                         break;
369                 udelay(1);
370         }
371
372         amdgpu_atombios_scratch_regs_engine_hung(adev, false);
373
374         return ret;
375 }
376
377 static bool nv_asic_supports_baco(struct amdgpu_device *adev)
378 {
379         struct smu_context *smu = &adev->smu;
380
381         if (smu_baco_is_support(smu))
382                 return true;
383         else
384                 return false;
385 }
386
387 static enum amd_reset_method
388 nv_asic_reset_method(struct amdgpu_device *adev)
389 {
390         struct smu_context *smu = &adev->smu;
391
392         if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
393             amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
394             amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
395             amdgpu_reset_method == AMD_RESET_METHOD_PCI)
396                 return amdgpu_reset_method;
397
398         if (amdgpu_reset_method != -1)
399                 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
400                                   amdgpu_reset_method);
401
402         switch (adev->asic_type) {
403         case CHIP_VANGOGH:
404                 return AMD_RESET_METHOD_MODE2;
405         case CHIP_SIENNA_CICHLID:
406         case CHIP_NAVY_FLOUNDER:
407         case CHIP_DIMGREY_CAVEFISH:
408                 return AMD_RESET_METHOD_MODE1;
409         default:
410                 if (smu_baco_is_support(smu))
411                         return AMD_RESET_METHOD_BACO;
412                 else
413                         return AMD_RESET_METHOD_MODE1;
414         }
415 }
416
417 static int nv_asic_reset(struct amdgpu_device *adev)
418 {
419         int ret = 0;
420         struct smu_context *smu = &adev->smu;
421
422         /* skip reset on vangogh for now */
423         if (adev->asic_type == CHIP_VANGOGH)
424                 return 0;
425
426         switch (nv_asic_reset_method(adev)) {
427         case AMD_RESET_METHOD_PCI:
428                 dev_info(adev->dev, "PCI reset\n");
429                 ret = amdgpu_device_pci_reset(adev);
430                 break;
431         case AMD_RESET_METHOD_BACO:
432                 dev_info(adev->dev, "BACO reset\n");
433
434                 ret = smu_baco_enter(smu);
435                 if (ret)
436                         return ret;
437                 ret = smu_baco_exit(smu);
438                 if (ret)
439                         return ret;
440                 break;
441         case AMD_RESET_METHOD_MODE2:
442                 dev_info(adev->dev, "MODE2 reset\n");
443                 ret = nv_asic_mode2_reset(adev);
444                 break;
445         default:
446                 dev_info(adev->dev, "MODE1 reset\n");
447                 ret = nv_asic_mode1_reset(adev);
448                 break;
449         }
450
451         return ret;
452 }
453
454 static int nv_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
455 {
456         /* todo */
457         return 0;
458 }
459
460 static int nv_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
461 {
462         /* todo */
463         return 0;
464 }
465
466 static void nv_pcie_gen3_enable(struct amdgpu_device *adev)
467 {
468         if (pci_is_root_bus(adev->pdev->bus))
469                 return;
470
471         if (amdgpu_pcie_gen2 == 0)
472                 return;
473
474         if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
475                                         CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
476                 return;
477
478         /* todo */
479 }
480
481 static void nv_program_aspm(struct amdgpu_device *adev)
482 {
483         if (amdgpu_aspm != 1)
484                 return;
485
486         if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
487             !(adev->flags & AMD_IS_APU) &&
488             (adev->nbio.funcs->program_aspm))
489                 adev->nbio.funcs->program_aspm(adev);
490
491 }
492
493 static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
494                                         bool enable)
495 {
496         adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
497         adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
498 }
499
500 static const struct amdgpu_ip_block_version nv_common_ip_block =
501 {
502         .type = AMD_IP_BLOCK_TYPE_COMMON,
503         .major = 1,
504         .minor = 0,
505         .rev = 0,
506         .funcs = &nv_common_ip_funcs,
507 };
508
509 static int nv_reg_base_init(struct amdgpu_device *adev)
510 {
511         int r;
512
513         if (amdgpu_discovery) {
514                 r = amdgpu_discovery_reg_base_init(adev);
515                 if (r) {
516                         DRM_WARN("failed to init reg base from ip discovery table, "
517                                         "fallback to legacy init method\n");
518                         goto legacy_init;
519                 }
520
521                 return 0;
522         }
523
524 legacy_init:
525         switch (adev->asic_type) {
526         case CHIP_NAVI10:
527                 navi10_reg_base_init(adev);
528                 break;
529         case CHIP_NAVI14:
530                 navi14_reg_base_init(adev);
531                 break;
532         case CHIP_NAVI12:
533                 navi12_reg_base_init(adev);
534                 break;
535         case CHIP_SIENNA_CICHLID:
536         case CHIP_NAVY_FLOUNDER:
537                 sienna_cichlid_reg_base_init(adev);
538                 break;
539         case CHIP_VANGOGH:
540                 vangogh_reg_base_init(adev);
541                 break;
542         case CHIP_DIMGREY_CAVEFISH:
543                 dimgrey_cavefish_reg_base_init(adev);
544                 break;
545         default:
546                 return -EINVAL;
547         }
548
549         return 0;
550 }
551
552 void nv_set_virt_ops(struct amdgpu_device *adev)
553 {
554         adev->virt.ops = &xgpu_nv_virt_ops;
555 }
556
557 static bool nv_is_headless_sku(struct pci_dev *pdev)
558 {
559         if ((pdev->device == 0x731E &&
560             (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
561             (pdev->device == 0x7340 && pdev->revision == 0xC9))
562                 return true;
563         return false;
564 }
565
566 int nv_set_ip_blocks(struct amdgpu_device *adev)
567 {
568         int r;
569
570         if (adev->flags & AMD_IS_APU) {
571                 adev->nbio.funcs = &nbio_v7_2_funcs;
572                 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
573         } else {
574                 adev->nbio.funcs = &nbio_v2_3_funcs;
575                 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
576         }
577         adev->hdp.funcs = &hdp_v5_0_funcs;
578
579         if (adev->asic_type >= CHIP_SIENNA_CICHLID)
580                 adev->smuio.funcs = &smuio_v11_0_6_funcs;
581         else
582                 adev->smuio.funcs = &smuio_v11_0_funcs;
583
584         if (adev->asic_type == CHIP_SIENNA_CICHLID)
585                 adev->gmc.xgmi.supported = true;
586
587         /* Set IP register base before any HW register access */
588         r = nv_reg_base_init(adev);
589         if (r)
590                 return r;
591
592         switch (adev->asic_type) {
593         case CHIP_NAVI10:
594         case CHIP_NAVI14:
595                 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
596                 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
597                 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
598                 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
599                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
600                     !amdgpu_sriov_vf(adev))
601                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
602                 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
603                         amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
604 #if defined(CONFIG_DRM_AMD_DC)
605                 else if (amdgpu_device_has_dc_support(adev))
606                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
607 #endif
608                 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
609                 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
610                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
611                     !amdgpu_sriov_vf(adev))
612                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
613                 if (!nv_is_headless_sku(adev->pdev))
614                         amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
615                 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
616                 if (adev->enable_mes)
617                         amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
618                 break;
619         case CHIP_NAVI12:
620                 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
621                 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
622                 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
623                 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
624                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
625                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
626                 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
627                         amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
628 #if defined(CONFIG_DRM_AMD_DC)
629                 else if (amdgpu_device_has_dc_support(adev))
630                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
631 #endif
632                 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
633                 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
634                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
635                     !amdgpu_sriov_vf(adev))
636                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
637                 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
638                 if (!amdgpu_sriov_vf(adev))
639                         amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
640                 break;
641         case CHIP_SIENNA_CICHLID:
642                 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
643                 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
644                 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
645                 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
646                         amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
647                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
648                     is_support_sw_smu(adev))
649                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
650                 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
651                         amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
652 #if defined(CONFIG_DRM_AMD_DC)
653                 else if (amdgpu_device_has_dc_support(adev))
654                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
655 #endif
656                 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
657                 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
658                 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
659                 if (!amdgpu_sriov_vf(adev))
660                         amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
661
662                 if (adev->enable_mes)
663                         amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
664                 break;
665         case CHIP_NAVY_FLOUNDER:
666                 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
667                 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
668                 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
669                 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
670                         amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
671                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
672                     is_support_sw_smu(adev))
673                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
674                 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
675                         amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
676 #if defined(CONFIG_DRM_AMD_DC)
677                 else if (amdgpu_device_has_dc_support(adev))
678                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
679 #endif
680                 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
681                 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
682                 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
683                 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
684                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
685                     is_support_sw_smu(adev))
686                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
687                 break;
688         case CHIP_VANGOGH:
689                 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
690                 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
691                 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
692                 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
693                         amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
694                 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
695                 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
696                         amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
697 #if defined(CONFIG_DRM_AMD_DC)
698                 else if (amdgpu_device_has_dc_support(adev))
699                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
700 #endif
701                 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
702                 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
703                 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
704                 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
705                 break;
706         case CHIP_DIMGREY_CAVEFISH:
707                 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
708                 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
709                 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
710                 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
711                         amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
712                 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
713                     is_support_sw_smu(adev))
714                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
715                 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
716                         amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
717 #if defined(CONFIG_DRM_AMD_DC)
718                 else if (amdgpu_device_has_dc_support(adev))
719                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
720 #endif
721                 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
722                 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
723                 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
724                 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         return 0;
731 }
732
733 static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
734 {
735         return adev->nbio.funcs->get_rev_id(adev);
736 }
737
738 static bool nv_need_full_reset(struct amdgpu_device *adev)
739 {
740         return true;
741 }
742
743 static bool nv_need_reset_on_init(struct amdgpu_device *adev)
744 {
745         u32 sol_reg;
746
747         if (adev->flags & AMD_IS_APU)
748                 return false;
749
750         /* Check sOS sign of life register to confirm sys driver and sOS
751          * are already been loaded.
752          */
753         sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
754         if (sol_reg)
755                 return true;
756
757         return false;
758 }
759
760 static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev)
761 {
762
763         /* TODO
764          * dummy implement for pcie_replay_count sysfs interface
765          * */
766
767         return 0;
768 }
769
770 static void nv_init_doorbell_index(struct amdgpu_device *adev)
771 {
772         adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ;
773         adev->doorbell_index.mec_ring0 = AMDGPU_NAVI10_DOORBELL_MEC_RING0;
774         adev->doorbell_index.mec_ring1 = AMDGPU_NAVI10_DOORBELL_MEC_RING1;
775         adev->doorbell_index.mec_ring2 = AMDGPU_NAVI10_DOORBELL_MEC_RING2;
776         adev->doorbell_index.mec_ring3 = AMDGPU_NAVI10_DOORBELL_MEC_RING3;
777         adev->doorbell_index.mec_ring4 = AMDGPU_NAVI10_DOORBELL_MEC_RING4;
778         adev->doorbell_index.mec_ring5 = AMDGPU_NAVI10_DOORBELL_MEC_RING5;
779         adev->doorbell_index.mec_ring6 = AMDGPU_NAVI10_DOORBELL_MEC_RING6;
780         adev->doorbell_index.mec_ring7 = AMDGPU_NAVI10_DOORBELL_MEC_RING7;
781         adev->doorbell_index.userqueue_start = AMDGPU_NAVI10_DOORBELL_USERQUEUE_START;
782         adev->doorbell_index.userqueue_end = AMDGPU_NAVI10_DOORBELL_USERQUEUE_END;
783         adev->doorbell_index.gfx_ring0 = AMDGPU_NAVI10_DOORBELL_GFX_RING0;
784         adev->doorbell_index.gfx_ring1 = AMDGPU_NAVI10_DOORBELL_GFX_RING1;
785         adev->doorbell_index.mes_ring = AMDGPU_NAVI10_DOORBELL_MES_RING;
786         adev->doorbell_index.sdma_engine[0] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0;
787         adev->doorbell_index.sdma_engine[1] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1;
788         adev->doorbell_index.sdma_engine[2] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2;
789         adev->doorbell_index.sdma_engine[3] = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3;
790         adev->doorbell_index.ih = AMDGPU_NAVI10_DOORBELL_IH;
791         adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_NAVI10_DOORBELL64_VCN0_1;
792         adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_NAVI10_DOORBELL64_VCN2_3;
793         adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_NAVI10_DOORBELL64_VCN4_5;
794         adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_NAVI10_DOORBELL64_VCN6_7;
795         adev->doorbell_index.first_non_cp = AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP;
796         adev->doorbell_index.last_non_cp = AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP;
797
798         adev->doorbell_index.max_assignment = AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT << 1;
799         adev->doorbell_index.sdma_doorbell_range = 20;
800 }
801
802 static void nv_pre_asic_init(struct amdgpu_device *adev)
803 {
804 }
805
806 static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
807                                        bool enter)
808 {
809         if (enter)
810                 amdgpu_gfx_rlc_enter_safe_mode(adev);
811         else
812                 amdgpu_gfx_rlc_exit_safe_mode(adev);
813
814         if (adev->gfx.funcs->update_perfmon_mgcg)
815                 adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
816
817         /*
818          * The ASPM function is not fully enabled and verified on
819          * Navi yet. Temporarily skip this until ASPM enabled.
820          */
821         if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
822             !(adev->flags & AMD_IS_APU) &&
823             (adev->nbio.funcs->enable_aspm))
824                 adev->nbio.funcs->enable_aspm(adev, !enter);
825
826         return 0;
827 }
828
829 static const struct amdgpu_asic_funcs nv_asic_funcs =
830 {
831         .read_disabled_bios = &nv_read_disabled_bios,
832         .read_bios_from_rom = &nv_read_bios_from_rom,
833         .read_register = &nv_read_register,
834         .reset = &nv_asic_reset,
835         .reset_method = &nv_asic_reset_method,
836         .set_vga_state = &nv_vga_set_state,
837         .get_xclk = &nv_get_xclk,
838         .set_uvd_clocks = &nv_set_uvd_clocks,
839         .set_vce_clocks = &nv_set_vce_clocks,
840         .get_config_memsize = &nv_get_config_memsize,
841         .init_doorbell_index = &nv_init_doorbell_index,
842         .need_full_reset = &nv_need_full_reset,
843         .need_reset_on_init = &nv_need_reset_on_init,
844         .get_pcie_replay_count = &nv_get_pcie_replay_count,
845         .supports_baco = &nv_asic_supports_baco,
846         .pre_asic_init = &nv_pre_asic_init,
847         .update_umd_stable_pstate = &nv_update_umd_stable_pstate,
848 };
849
850 static int nv_common_early_init(void *handle)
851 {
852 #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
853         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
854
855         adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
856         adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
857         adev->smc_rreg = NULL;
858         adev->smc_wreg = NULL;
859         adev->pcie_rreg = &nv_pcie_rreg;
860         adev->pcie_wreg = &nv_pcie_wreg;
861         adev->pcie_rreg64 = &nv_pcie_rreg64;
862         adev->pcie_wreg64 = &nv_pcie_wreg64;
863         adev->pciep_rreg = &nv_pcie_port_rreg;
864         adev->pciep_wreg = &nv_pcie_port_wreg;
865
866         /* TODO: will add them during VCN v2 implementation */
867         adev->uvd_ctx_rreg = NULL;
868         adev->uvd_ctx_wreg = NULL;
869
870         adev->didt_rreg = &nv_didt_rreg;
871         adev->didt_wreg = &nv_didt_wreg;
872
873         adev->asic_funcs = &nv_asic_funcs;
874
875         adev->rev_id = nv_get_rev_id(adev);
876         adev->external_rev_id = 0xff;
877         switch (adev->asic_type) {
878         case CHIP_NAVI10:
879                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
880                         AMD_CG_SUPPORT_GFX_CGCG |
881                         AMD_CG_SUPPORT_IH_CG |
882                         AMD_CG_SUPPORT_HDP_MGCG |
883                         AMD_CG_SUPPORT_HDP_LS |
884                         AMD_CG_SUPPORT_SDMA_MGCG |
885                         AMD_CG_SUPPORT_SDMA_LS |
886                         AMD_CG_SUPPORT_MC_MGCG |
887                         AMD_CG_SUPPORT_MC_LS |
888                         AMD_CG_SUPPORT_ATHUB_MGCG |
889                         AMD_CG_SUPPORT_ATHUB_LS |
890                         AMD_CG_SUPPORT_VCN_MGCG |
891                         AMD_CG_SUPPORT_JPEG_MGCG |
892                         AMD_CG_SUPPORT_BIF_MGCG |
893                         AMD_CG_SUPPORT_BIF_LS;
894                 adev->pg_flags = AMD_PG_SUPPORT_VCN |
895                         AMD_PG_SUPPORT_VCN_DPG |
896                         AMD_PG_SUPPORT_JPEG |
897                         AMD_PG_SUPPORT_ATHUB;
898                 adev->external_rev_id = adev->rev_id + 0x1;
899                 break;
900         case CHIP_NAVI14:
901                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
902                         AMD_CG_SUPPORT_GFX_CGCG |
903                         AMD_CG_SUPPORT_IH_CG |
904                         AMD_CG_SUPPORT_HDP_MGCG |
905                         AMD_CG_SUPPORT_HDP_LS |
906                         AMD_CG_SUPPORT_SDMA_MGCG |
907                         AMD_CG_SUPPORT_SDMA_LS |
908                         AMD_CG_SUPPORT_MC_MGCG |
909                         AMD_CG_SUPPORT_MC_LS |
910                         AMD_CG_SUPPORT_ATHUB_MGCG |
911                         AMD_CG_SUPPORT_ATHUB_LS |
912                         AMD_CG_SUPPORT_VCN_MGCG |
913                         AMD_CG_SUPPORT_JPEG_MGCG |
914                         AMD_CG_SUPPORT_BIF_MGCG |
915                         AMD_CG_SUPPORT_BIF_LS;
916                 adev->pg_flags = AMD_PG_SUPPORT_VCN |
917                         AMD_PG_SUPPORT_JPEG |
918                         AMD_PG_SUPPORT_VCN_DPG;
919                 adev->external_rev_id = adev->rev_id + 20;
920                 break;
921         case CHIP_NAVI12:
922                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
923                         AMD_CG_SUPPORT_GFX_MGLS |
924                         AMD_CG_SUPPORT_GFX_CGCG |
925                         AMD_CG_SUPPORT_GFX_CP_LS |
926                         AMD_CG_SUPPORT_GFX_RLC_LS |
927                         AMD_CG_SUPPORT_IH_CG |
928                         AMD_CG_SUPPORT_HDP_MGCG |
929                         AMD_CG_SUPPORT_HDP_LS |
930                         AMD_CG_SUPPORT_SDMA_MGCG |
931                         AMD_CG_SUPPORT_SDMA_LS |
932                         AMD_CG_SUPPORT_MC_MGCG |
933                         AMD_CG_SUPPORT_MC_LS |
934                         AMD_CG_SUPPORT_ATHUB_MGCG |
935                         AMD_CG_SUPPORT_ATHUB_LS |
936                         AMD_CG_SUPPORT_VCN_MGCG |
937                         AMD_CG_SUPPORT_JPEG_MGCG;
938                 adev->pg_flags = AMD_PG_SUPPORT_VCN |
939                         AMD_PG_SUPPORT_VCN_DPG |
940                         AMD_PG_SUPPORT_JPEG |
941                         AMD_PG_SUPPORT_ATHUB;
942                 /* guest vm gets 0xffffffff when reading RCC_DEV0_EPF0_STRAP0,
943                  * as a consequence, the rev_id and external_rev_id are wrong.
944                  * workaround it by hardcoding rev_id to 0 (default value).
945                  */
946                 if (amdgpu_sriov_vf(adev))
947                         adev->rev_id = 0;
948                 adev->external_rev_id = adev->rev_id + 0xa;
949                 break;
950         case CHIP_SIENNA_CICHLID:
951                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
952                         AMD_CG_SUPPORT_GFX_CGCG |
953                         AMD_CG_SUPPORT_GFX_3D_CGCG |
954                         AMD_CG_SUPPORT_MC_MGCG |
955                         AMD_CG_SUPPORT_VCN_MGCG |
956                         AMD_CG_SUPPORT_JPEG_MGCG |
957                         AMD_CG_SUPPORT_HDP_MGCG |
958                         AMD_CG_SUPPORT_HDP_LS |
959                         AMD_CG_SUPPORT_IH_CG |
960                         AMD_CG_SUPPORT_MC_LS;
961                 adev->pg_flags = AMD_PG_SUPPORT_VCN |
962                         AMD_PG_SUPPORT_VCN_DPG |
963                         AMD_PG_SUPPORT_JPEG |
964                         AMD_PG_SUPPORT_ATHUB |
965                         AMD_PG_SUPPORT_MMHUB;
966                 if (amdgpu_sriov_vf(adev)) {
967                         /* hypervisor control CG and PG enablement */
968                         adev->cg_flags = 0;
969                         adev->pg_flags = 0;
970                 }
971                 adev->external_rev_id = adev->rev_id + 0x28;
972                 break;
973         case CHIP_NAVY_FLOUNDER:
974                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
975                         AMD_CG_SUPPORT_GFX_CGCG |
976                         AMD_CG_SUPPORT_GFX_3D_CGCG |
977                         AMD_CG_SUPPORT_VCN_MGCG |
978                         AMD_CG_SUPPORT_JPEG_MGCG |
979                         AMD_CG_SUPPORT_MC_MGCG |
980                         AMD_CG_SUPPORT_MC_LS |
981                         AMD_CG_SUPPORT_HDP_MGCG |
982                         AMD_CG_SUPPORT_HDP_LS |
983                         AMD_CG_SUPPORT_IH_CG;
984                 adev->pg_flags = AMD_PG_SUPPORT_VCN |
985                         AMD_PG_SUPPORT_VCN_DPG |
986                         AMD_PG_SUPPORT_JPEG |
987                         AMD_PG_SUPPORT_ATHUB |
988                         AMD_PG_SUPPORT_MMHUB;
989                 adev->external_rev_id = adev->rev_id + 0x32;
990                 break;
991
992         case CHIP_VANGOGH:
993                 adev->apu_flags |= AMD_APU_IS_VANGOGH;
994                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
995                         AMD_CG_SUPPORT_GFX_MGLS |
996                         AMD_CG_SUPPORT_GFX_CP_LS |
997                         AMD_CG_SUPPORT_GFX_RLC_LS |
998                         AMD_CG_SUPPORT_GFX_CGCG |
999                         AMD_CG_SUPPORT_GFX_CGLS |
1000                         AMD_CG_SUPPORT_GFX_3D_CGCG |
1001                         AMD_CG_SUPPORT_GFX_3D_CGLS |
1002                         AMD_CG_SUPPORT_MC_MGCG |
1003                         AMD_CG_SUPPORT_MC_LS |
1004                         AMD_CG_SUPPORT_GFX_FGCG |
1005                         AMD_CG_SUPPORT_VCN_MGCG |
1006                         AMD_CG_SUPPORT_JPEG_MGCG;
1007                 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1008                         AMD_PG_SUPPORT_VCN |
1009                         AMD_PG_SUPPORT_VCN_DPG |
1010                         AMD_PG_SUPPORT_JPEG;
1011                 if (adev->apu_flags & AMD_APU_IS_VANGOGH)
1012                         adev->external_rev_id = adev->rev_id + 0x01;
1013                 break;
1014         case CHIP_DIMGREY_CAVEFISH:
1015                 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1016                         AMD_CG_SUPPORT_GFX_CGCG |
1017                         AMD_CG_SUPPORT_GFX_3D_CGCG |
1018                         AMD_CG_SUPPORT_VCN_MGCG |
1019                         AMD_CG_SUPPORT_JPEG_MGCG |
1020                         AMD_CG_SUPPORT_MC_MGCG |
1021                         AMD_CG_SUPPORT_MC_LS |
1022                         AMD_CG_SUPPORT_HDP_MGCG |
1023                         AMD_CG_SUPPORT_HDP_LS |
1024                         AMD_CG_SUPPORT_IH_CG;
1025                 adev->pg_flags = AMD_PG_SUPPORT_VCN |
1026                         AMD_PG_SUPPORT_VCN_DPG |
1027                         AMD_PG_SUPPORT_JPEG |
1028                         AMD_PG_SUPPORT_ATHUB |
1029                         AMD_PG_SUPPORT_MMHUB;
1030                 adev->external_rev_id = adev->rev_id + 0x3c;
1031                 break;
1032         default:
1033                 /* FIXME: not supported yet */
1034                 return -EINVAL;
1035         }
1036
1037         if (amdgpu_sriov_vf(adev)) {
1038                 amdgpu_virt_init_setting(adev);
1039                 xgpu_nv_mailbox_set_irq_funcs(adev);
1040         }
1041
1042         return 0;
1043 }
1044
1045 static int nv_common_late_init(void *handle)
1046 {
1047         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1048
1049         if (amdgpu_sriov_vf(adev))
1050                 xgpu_nv_mailbox_get_irq(adev);
1051
1052         return 0;
1053 }
1054
1055 static int nv_common_sw_init(void *handle)
1056 {
1057         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1058
1059         if (amdgpu_sriov_vf(adev))
1060                 xgpu_nv_mailbox_add_irq_id(adev);
1061
1062         return 0;
1063 }
1064
1065 static int nv_common_sw_fini(void *handle)
1066 {
1067         return 0;
1068 }
1069
1070 static int nv_common_hw_init(void *handle)
1071 {
1072         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1073
1074         /* enable pcie gen2/3 link */
1075         nv_pcie_gen3_enable(adev);
1076         /* enable aspm */
1077         nv_program_aspm(adev);
1078         /* setup nbio registers */
1079         adev->nbio.funcs->init_registers(adev);
1080         /* remap HDP registers to a hole in mmio space,
1081          * for the purpose of expose those registers
1082          * to process space
1083          */
1084         if (adev->nbio.funcs->remap_hdp_registers)
1085                 adev->nbio.funcs->remap_hdp_registers(adev);
1086         /* enable the doorbell aperture */
1087         nv_enable_doorbell_aperture(adev, true);
1088
1089         return 0;
1090 }
1091
1092 static int nv_common_hw_fini(void *handle)
1093 {
1094         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1095
1096         /* disable the doorbell aperture */
1097         nv_enable_doorbell_aperture(adev, false);
1098
1099         return 0;
1100 }
1101
1102 static int nv_common_suspend(void *handle)
1103 {
1104         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1105
1106         return nv_common_hw_fini(adev);
1107 }
1108
1109 static int nv_common_resume(void *handle)
1110 {
1111         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1112
1113         return nv_common_hw_init(adev);
1114 }
1115
1116 static bool nv_common_is_idle(void *handle)
1117 {
1118         return true;
1119 }
1120
1121 static int nv_common_wait_for_idle(void *handle)
1122 {
1123         return 0;
1124 }
1125
1126 static int nv_common_soft_reset(void *handle)
1127 {
1128         return 0;
1129 }
1130
1131 static int nv_common_set_clockgating_state(void *handle,
1132                                            enum amd_clockgating_state state)
1133 {
1134         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1135
1136         if (amdgpu_sriov_vf(adev))
1137                 return 0;
1138
1139         switch (adev->asic_type) {
1140         case CHIP_NAVI10:
1141         case CHIP_NAVI14:
1142         case CHIP_NAVI12:
1143         case CHIP_SIENNA_CICHLID:
1144         case CHIP_NAVY_FLOUNDER:
1145         case CHIP_DIMGREY_CAVEFISH:
1146                 adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1147                                 state == AMD_CG_STATE_GATE);
1148                 adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1149                                 state == AMD_CG_STATE_GATE);
1150                 adev->hdp.funcs->update_clock_gating(adev,
1151                                 state == AMD_CG_STATE_GATE);
1152                 adev->smuio.funcs->update_rom_clock_gating(adev,
1153                                 state == AMD_CG_STATE_GATE);
1154                 break;
1155         default:
1156                 break;
1157         }
1158         return 0;
1159 }
1160
1161 static int nv_common_set_powergating_state(void *handle,
1162                                            enum amd_powergating_state state)
1163 {
1164         /* TODO */
1165         return 0;
1166 }
1167
1168 static void nv_common_get_clockgating_state(void *handle, u32 *flags)
1169 {
1170         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1171
1172         if (amdgpu_sriov_vf(adev))
1173                 *flags = 0;
1174
1175         adev->nbio.funcs->get_clockgating_state(adev, flags);
1176
1177         adev->hdp.funcs->get_clock_gating_state(adev, flags);
1178
1179         adev->smuio.funcs->get_clock_gating_state(adev, flags);
1180
1181         return;
1182 }
1183
1184 static const struct amd_ip_funcs nv_common_ip_funcs = {
1185         .name = "nv_common",
1186         .early_init = nv_common_early_init,
1187         .late_init = nv_common_late_init,
1188         .sw_init = nv_common_sw_init,
1189         .sw_fini = nv_common_sw_fini,
1190         .hw_init = nv_common_hw_init,
1191         .hw_fini = nv_common_hw_fini,
1192         .suspend = nv_common_suspend,
1193         .resume = nv_common_resume,
1194         .is_idle = nv_common_is_idle,
1195         .wait_for_idle = nv_common_wait_for_idle,
1196         .soft_reset = nv_common_soft_reset,
1197         .set_clockgating_state = nv_common_set_clockgating_state,
1198         .set_powergating_state = nv_common_set_powergating_state,
1199         .get_clockgating_state = nv_common_get_clockgating_state,
1200 };
This page took 0.111535 seconds and 4 git commands to generate.