2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30 #include "amdgpu_ras.h"
34 #include "gfx_v9_4_3.h"
39 #include "df_v4_6_2.h"
40 #include "nbio_v6_1.h"
41 #include "nbio_v7_0.h"
42 #include "nbio_v7_4.h"
43 #include "nbio_v7_9.h"
44 #include "nbio_v7_11.h"
46 #include "vega10_ih.h"
47 #include "vega20_ih.h"
48 #include "sdma_v4_0.h"
49 #include "sdma_v4_4_2.h"
54 #include "jpeg_v2_5.h"
55 #include "smuio_v9_0.h"
56 #include "gmc_v10_0.h"
57 #include "gmc_v11_0.h"
58 #include "gmc_v12_0.h"
59 #include "gfxhub_v2_0.h"
60 #include "mmhub_v2_0.h"
61 #include "nbio_v2_3.h"
62 #include "nbio_v4_3.h"
63 #include "nbio_v7_2.h"
64 #include "nbio_v7_7.h"
65 #include "nbif_v6_3_1.h"
73 #include "navi10_ih.h"
77 #include "gfx_v10_0.h"
78 #include "gfx_v11_0.h"
79 #include "gfx_v12_0.h"
80 #include "sdma_v5_0.h"
81 #include "sdma_v5_2.h"
82 #include "sdma_v6_0.h"
83 #include "sdma_v7_0.h"
84 #include "lsdma_v6_0.h"
85 #include "lsdma_v7_0.h"
87 #include "jpeg_v2_0.h"
89 #include "jpeg_v3_0.h"
91 #include "jpeg_v4_0.h"
92 #include "vcn_v4_0_3.h"
93 #include "jpeg_v4_0_3.h"
94 #include "vcn_v4_0_5.h"
95 #include "jpeg_v4_0_5.h"
96 #include "amdgpu_vkms.h"
97 #include "mes_v11_0.h"
98 #include "mes_v12_0.h"
99 #include "smuio_v11_0.h"
100 #include "smuio_v11_0_6.h"
101 #include "smuio_v13_0.h"
102 #include "smuio_v13_0_3.h"
103 #include "smuio_v13_0_6.h"
104 #include "smuio_v14_0_2.h"
105 #include "vcn_v5_0_0.h"
106 #include "jpeg_v5_0_0.h"
108 #include "amdgpu_vpe.h"
110 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
111 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
113 #define mmIP_DISCOVERY_VERSION 0x16A00
114 #define mmRCC_CONFIG_MEMSIZE 0xde3
115 #define mmMP0_SMN_C2PMSG_33 0x16061
116 #define mmMM_INDEX 0x0
117 #define mmMM_INDEX_HI 0x6
118 #define mmMM_DATA 0x1
120 static const char *hw_id_names[HW_ID_MAX] = {
124 [SMUIO_HWID] = "SMUIO",
125 [FUSE_HWID] = "FUSE",
126 [CLKA_HWID] = "CLKA",
130 [AUDIO_AZ_HWID] = "AUDIO_AZ",
136 [XDMA_HWID] = "XDMA",
137 [DCEAZ_HWID] = "DCEAZ",
139 [SDPMUX_HWID] = "SDPMUX",
141 [IOHC_HWID] = "IOHC",
142 [L2IMU_HWID] = "L2IMU",
144 [MMHUB_HWID] = "MMHUB",
145 [ATHUB_HWID] = "ATHUB",
146 [DBGU_NBIO_HWID] = "DBGU_NBIO",
148 [DBGU0_HWID] = "DBGU0",
149 [DBGU1_HWID] = "DBGU1",
150 [OSSSYS_HWID] = "OSSSYS",
152 [SDMA0_HWID] = "SDMA0",
153 [SDMA1_HWID] = "SDMA1",
154 [SDMA2_HWID] = "SDMA2",
155 [SDMA3_HWID] = "SDMA3",
156 [LSDMA_HWID] = "LSDMA",
158 [DBGU_IO_HWID] = "DBGU_IO",
160 [CLKB_HWID] = "CLKB",
162 [DFX_DAP_HWID] = "DFX_DAP",
163 [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
164 [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
165 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
166 [L1IMU3_HWID] = "L1IMU3",
167 [L1IMU4_HWID] = "L1IMU4",
168 [L1IMU5_HWID] = "L1IMU5",
169 [L1IMU6_HWID] = "L1IMU6",
170 [L1IMU7_HWID] = "L1IMU7",
171 [L1IMU8_HWID] = "L1IMU8",
172 [L1IMU9_HWID] = "L1IMU9",
173 [L1IMU10_HWID] = "L1IMU10",
174 [L1IMU11_HWID] = "L1IMU11",
175 [L1IMU12_HWID] = "L1IMU12",
176 [L1IMU13_HWID] = "L1IMU13",
177 [L1IMU14_HWID] = "L1IMU14",
178 [L1IMU15_HWID] = "L1IMU15",
179 [WAFLC_HWID] = "WAFLC",
180 [FCH_USB_PD_HWID] = "FCH_USB_PD",
181 [PCIE_HWID] = "PCIE",
183 [DDCL_HWID] = "DDCL",
185 [IOAGR_HWID] = "IOAGR",
186 [NBIF_HWID] = "NBIF",
187 [IOAPIC_HWID] = "IOAPIC",
188 [SYSTEMHUB_HWID] = "SYSTEMHUB",
189 [NTBCCP_HWID] = "NTBCCP",
191 [SATA_HWID] = "SATA",
193 [CCXSEC_HWID] = "CCXSEC",
194 [XGMI_HWID] = "XGMI",
195 [XGBE_HWID] = "XGBE",
200 static int hw_id_map[MAX_HWIP] = {
202 [HDP_HWIP] = HDP_HWID,
203 [SDMA0_HWIP] = SDMA0_HWID,
204 [SDMA1_HWIP] = SDMA1_HWID,
205 [SDMA2_HWIP] = SDMA2_HWID,
206 [SDMA3_HWIP] = SDMA3_HWID,
207 [LSDMA_HWIP] = LSDMA_HWID,
208 [MMHUB_HWIP] = MMHUB_HWID,
209 [ATHUB_HWIP] = ATHUB_HWID,
210 [NBIO_HWIP] = NBIF_HWID,
211 [MP0_HWIP] = MP0_HWID,
212 [MP1_HWIP] = MP1_HWID,
213 [UVD_HWIP] = UVD_HWID,
214 [VCE_HWIP] = VCE_HWID,
216 [DCE_HWIP] = DMU_HWID,
217 [OSSSYS_HWIP] = OSSSYS_HWID,
218 [SMUIO_HWIP] = SMUIO_HWID,
219 [PWR_HWIP] = PWR_HWID,
220 [NBIF_HWIP] = NBIF_HWID,
221 [THM_HWIP] = THM_HWID,
222 [CLK_HWIP] = CLKA_HWID,
223 [UMC_HWIP] = UMC_HWID,
224 [XGMI_HWIP] = XGMI_HWID,
225 [DCI_HWIP] = DCI_HWID,
226 [PCIE_HWIP] = PCIE_HWID,
227 [VPE_HWIP] = VPE_HWID,
230 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
232 u64 tmr_offset, tmr_size, pos;
236 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
240 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
242 /* This region is read-only and reserved from system use */
243 discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
245 memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
246 memunmap(discv_regn);
253 #define IP_DISCOVERY_V2 2
254 #define IP_DISCOVERY_V4 4
256 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
263 if (!amdgpu_sriov_vf(adev)) {
264 /* It can take up to a second for IFWI init to complete on some dGPUs,
265 * but generally it should be in the 60-100ms range. Normally this starts
266 * as soon as the device gets power so by the time the OS loads this has long
267 * completed. However, when a card is hotplugged via e.g., USB4, we need to
268 * wait for this to complete. Once the C2PMSG is updated, we can
272 for (i = 0; i < 1000; i++) {
273 msg = RREG32(mmMP0_SMN_C2PMSG_33);
274 if (msg & 0x80000000)
276 usleep_range(1000, 1100);
280 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
283 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
284 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
285 adev->mman.discovery_tmr_size, false);
287 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
293 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
295 const struct firmware *fw;
299 switch (amdgpu_discovery) {
301 fw_name = FIRMWARE_IP_DISCOVERY;
304 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
308 r = request_firmware(&fw, fw_name, adev->dev);
310 dev_err(adev->dev, "can't load firmware \"%s\"\n",
315 memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
316 release_firmware(fw);
321 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
323 uint16_t checksum = 0;
326 for (i = 0; i < size; i++)
332 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
335 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
338 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
340 struct binary_header *bhdr;
341 bhdr = (struct binary_header *)binary;
343 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
346 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
349 * So far, apply this quirk only on those Navy Flounder boards which
350 * have a bad harvest table of VCN config.
352 if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
353 (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
354 switch (adev->pdev->revision) {
362 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
363 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
371 static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
372 struct binary_header *bhdr)
374 struct table_info *info;
378 info = &bhdr->table_list[NPS_INFO];
379 offset = le16_to_cpu(info->offset);
380 checksum = le16_to_cpu(info->checksum);
382 struct nps_info_header *nhdr =
383 (struct nps_info_header *)(adev->mman.discovery_bin + offset);
385 if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
386 dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
390 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
391 le32_to_cpu(nhdr->size_bytes),
393 dev_dbg(adev->dev, "invalid nps info data table checksum\n");
400 static int amdgpu_discovery_init(struct amdgpu_device *adev)
402 struct table_info *info;
403 struct binary_header *bhdr;
409 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
410 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
411 if (!adev->mman.discovery_bin)
414 /* Read from file if it is the preferred option */
415 if (amdgpu_discovery == 2) {
416 dev_info(adev->dev, "use ip discovery information from file");
417 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
420 dev_err(adev->dev, "failed to read ip discovery binary from file\n");
426 r = amdgpu_discovery_read_binary_from_mem(
427 adev, adev->mman.discovery_bin);
432 /* check the ip discovery binary signature */
433 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
435 "get invalid ip discovery binary signature\n");
440 bhdr = (struct binary_header *)adev->mman.discovery_bin;
442 offset = offsetof(struct binary_header, binary_checksum) +
443 sizeof(bhdr->binary_checksum);
444 size = le16_to_cpu(bhdr->binary_size) - offset;
445 checksum = le16_to_cpu(bhdr->binary_checksum);
447 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
449 dev_err(adev->dev, "invalid ip discovery binary checksum\n");
454 info = &bhdr->table_list[IP_DISCOVERY];
455 offset = le16_to_cpu(info->offset);
456 checksum = le16_to_cpu(info->checksum);
459 struct ip_discovery_header *ihdr =
460 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
461 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
462 dev_err(adev->dev, "invalid ip discovery data table signature\n");
467 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
468 le16_to_cpu(ihdr->size), checksum)) {
469 dev_err(adev->dev, "invalid ip discovery data table checksum\n");
475 info = &bhdr->table_list[GC];
476 offset = le16_to_cpu(info->offset);
477 checksum = le16_to_cpu(info->checksum);
480 struct gpu_info_header *ghdr =
481 (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
483 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
484 dev_err(adev->dev, "invalid ip discovery gc table id\n");
489 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
490 le32_to_cpu(ghdr->size), checksum)) {
491 dev_err(adev->dev, "invalid gc data table checksum\n");
497 info = &bhdr->table_list[HARVEST_INFO];
498 offset = le16_to_cpu(info->offset);
499 checksum = le16_to_cpu(info->checksum);
502 struct harvest_info_header *hhdr =
503 (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
505 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
506 dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
511 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
512 sizeof(struct harvest_table), checksum)) {
513 dev_err(adev->dev, "invalid harvest data table checksum\n");
519 info = &bhdr->table_list[VCN_INFO];
520 offset = le16_to_cpu(info->offset);
521 checksum = le16_to_cpu(info->checksum);
524 struct vcn_info_header *vhdr =
525 (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
527 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
528 dev_err(adev->dev, "invalid ip discovery vcn table id\n");
533 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
534 le32_to_cpu(vhdr->size_bytes), checksum)) {
535 dev_err(adev->dev, "invalid vcn data table checksum\n");
541 info = &bhdr->table_list[MALL_INFO];
542 offset = le16_to_cpu(info->offset);
543 checksum = le16_to_cpu(info->checksum);
546 struct mall_info_header *mhdr =
547 (struct mall_info_header *)(adev->mman.discovery_bin + offset);
549 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
550 dev_err(adev->dev, "invalid ip discovery mall table id\n");
555 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
556 le32_to_cpu(mhdr->size_bytes), checksum)) {
557 dev_err(adev->dev, "invalid mall data table checksum\n");
566 kfree(adev->mman.discovery_bin);
567 adev->mman.discovery_bin = NULL;
568 if ((amdgpu_discovery != 2) &&
569 (RREG32(mmIP_DISCOVERY_VERSION) == 4))
570 amdgpu_ras_query_boot_status(adev, 4);
574 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
576 void amdgpu_discovery_fini(struct amdgpu_device *adev)
578 amdgpu_discovery_sysfs_fini(adev);
579 kfree(adev->mman.discovery_bin);
580 adev->mman.discovery_bin = NULL;
583 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip)
585 if (ip->instance_number >= HWIP_MAX_INSTANCE) {
586 DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n",
587 ip->instance_number);
590 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
591 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
592 le16_to_cpu(ip->hw_id));
599 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
600 uint32_t *vcn_harvest_count)
602 struct binary_header *bhdr;
603 struct ip_discovery_header *ihdr;
604 struct die_header *dhdr;
606 uint16_t die_offset, ip_offset, num_dies, num_ips;
609 bhdr = (struct binary_header *)adev->mman.discovery_bin;
610 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
611 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
612 num_dies = le16_to_cpu(ihdr->num_dies);
614 /* scan harvest bit of all IP data structures */
615 for (i = 0; i < num_dies; i++) {
616 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
617 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
618 num_ips = le16_to_cpu(dhdr->num_ips);
619 ip_offset = die_offset + sizeof(*dhdr);
621 for (j = 0; j < num_ips; j++) {
622 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
624 if (amdgpu_discovery_validate_ip(ip))
627 if (le16_to_cpu(ip->variant) == 1) {
628 switch (le16_to_cpu(ip->hw_id)) {
630 (*vcn_harvest_count)++;
631 if (ip->instance_number == 0) {
632 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
633 adev->vcn.inst_mask &=
634 ~AMDGPU_VCN_HARVEST_VCN0;
635 adev->jpeg.inst_mask &=
636 ~AMDGPU_VCN_HARVEST_VCN0;
638 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
639 adev->vcn.inst_mask &=
640 ~AMDGPU_VCN_HARVEST_VCN1;
641 adev->jpeg.inst_mask &=
642 ~AMDGPU_VCN_HARVEST_VCN1;
646 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
653 if (ihdr->base_addr_64_bit)
654 ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
656 ip_offset += struct_size(ip, base_address, ip->num_base_address);
661 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
662 uint32_t *vcn_harvest_count,
663 uint32_t *umc_harvest_count)
665 struct binary_header *bhdr;
666 struct harvest_table *harvest_info;
669 uint32_t umc_harvest_config = 0;
671 bhdr = (struct binary_header *)adev->mman.discovery_bin;
672 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
675 dev_err(adev->dev, "invalid harvest table offset\n");
679 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
681 for (i = 0; i < 32; i++) {
682 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
685 switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
687 (*vcn_harvest_count)++;
688 adev->vcn.harvest_config |=
689 (1 << harvest_info->list[i].number_instance);
690 adev->jpeg.harvest_config |=
691 (1 << harvest_info->list[i].number_instance);
693 adev->vcn.inst_mask &=
694 ~(1U << harvest_info->list[i].number_instance);
695 adev->jpeg.inst_mask &=
696 ~(1U << harvest_info->list[i].number_instance);
699 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
702 umc_harvest_config |=
703 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
704 (*umc_harvest_count)++;
707 adev->gfx.xcc_mask &=
708 ~(1U << harvest_info->list[i].number_instance);
711 adev->sdma.sdma_mask &=
712 ~(1U << harvest_info->list[i].number_instance);
719 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
723 /* ================================================== */
725 struct ip_hw_instance {
726 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
730 u8 major, minor, revision;
733 int num_base_addresses;
734 u32 base_addr[] __counted_by(num_base_addresses);
738 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
742 struct ip_die_entry {
743 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
747 /* -------------------------------------------------- */
749 struct ip_hw_instance_attr {
750 struct attribute attr;
751 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
754 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
756 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
759 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
761 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
764 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
766 return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
769 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
771 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
774 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
776 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
779 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
781 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
784 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
786 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
789 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
794 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
795 /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
797 if (at + 12 > PAGE_SIZE)
799 res = sysfs_emit_at(buf, at, "0x%08X\n",
800 ip_hw_instance->base_addr[ii]);
806 return res < 0 ? res : at;
809 static struct ip_hw_instance_attr ip_hw_attr[] = {
811 __ATTR_RO(num_instance),
816 __ATTR_RO(num_base_addresses),
817 __ATTR_RO(base_addr),
820 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
821 ATTRIBUTE_GROUPS(ip_hw_instance);
823 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
824 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
826 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
827 struct attribute *attr,
830 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
831 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
833 if (!ip_hw_attr->show)
836 return ip_hw_attr->show(ip_hw_instance, buf);
839 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
840 .show = ip_hw_instance_attr_show,
843 static void ip_hw_instance_release(struct kobject *kobj)
845 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
847 kfree(ip_hw_instance);
850 static const struct kobj_type ip_hw_instance_ktype = {
851 .release = ip_hw_instance_release,
852 .sysfs_ops = &ip_hw_instance_sysfs_ops,
853 .default_groups = ip_hw_instance_groups,
856 /* -------------------------------------------------- */
858 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
860 static void ip_hw_id_release(struct kobject *kobj)
862 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
864 if (!list_empty(&ip_hw_id->hw_id_kset.list))
865 DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
869 static const struct kobj_type ip_hw_id_ktype = {
870 .release = ip_hw_id_release,
871 .sysfs_ops = &kobj_sysfs_ops,
874 /* -------------------------------------------------- */
876 static void die_kobj_release(struct kobject *kobj);
877 static void ip_disc_release(struct kobject *kobj);
879 struct ip_die_entry_attribute {
880 struct attribute attr;
881 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
884 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
886 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
888 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
891 /* If there are more ip_die_entry attrs, other than the number of IPs,
892 * we can make this intro an array of attrs, and then initialize
893 * ip_die_entry_attrs in a loop.
895 static struct ip_die_entry_attribute num_ips_attr =
898 static struct attribute *ip_die_entry_attrs[] = {
902 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
904 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
906 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
907 struct attribute *attr,
910 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
911 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
913 if (!ip_die_entry_attr->show)
916 return ip_die_entry_attr->show(ip_die_entry, buf);
919 static void ip_die_entry_release(struct kobject *kobj)
921 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
923 if (!list_empty(&ip_die_entry->ip_kset.list))
924 DRM_ERROR("ip_die_entry->ip_kset is not empty");
928 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
929 .show = ip_die_entry_attr_show,
932 static const struct kobj_type ip_die_entry_ktype = {
933 .release = ip_die_entry_release,
934 .sysfs_ops = &ip_die_entry_sysfs_ops,
935 .default_groups = ip_die_entry_groups,
938 static const struct kobj_type die_kobj_ktype = {
939 .release = die_kobj_release,
940 .sysfs_ops = &kobj_sysfs_ops,
943 static const struct kobj_type ip_discovery_ktype = {
944 .release = ip_disc_release,
945 .sysfs_ops = &kobj_sysfs_ops,
948 struct ip_discovery_top {
949 struct kobject kobj; /* ip_discovery/ */
950 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
951 struct amdgpu_device *adev;
954 static void die_kobj_release(struct kobject *kobj)
956 struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
957 struct ip_discovery_top,
959 if (!list_empty(&ip_top->die_kset.list))
960 DRM_ERROR("ip_top->die_kset is not empty");
963 static void ip_disc_release(struct kobject *kobj)
965 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
967 struct amdgpu_device *adev = ip_top->adev;
973 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
974 uint16_t hw_id, uint8_t inst)
978 /* Until a uniform way is figured, get mask based on hwid */
981 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
984 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
988 /* TODO: It needs another parsing; for now, ignore.*/
991 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
994 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
1003 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
1004 struct ip_die_entry *ip_die_entry,
1005 const size_t _ip_offset, const int num_ips,
1008 int ii, jj, kk, res;
1010 DRM_DEBUG("num_ips:%d", num_ips);
1012 /* Find all IPs of a given HW ID, and add their instance to
1013 * #die/#hw_id/#instance/<attributes>
1015 for (ii = 0; ii < HW_ID_MAX; ii++) {
1016 struct ip_hw_id *ip_hw_id = NULL;
1017 size_t ip_offset = _ip_offset;
1019 for (jj = 0; jj < num_ips; jj++) {
1021 struct ip_hw_instance *ip_hw_instance;
1023 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1024 if (amdgpu_discovery_validate_ip(ip) ||
1025 le16_to_cpu(ip->hw_id) != ii)
1028 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
1030 /* We have a hw_id match; register the hw
1031 * block if not yet registered.
1034 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
1037 ip_hw_id->hw_id = ii;
1039 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
1040 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
1041 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
1042 res = kset_register(&ip_hw_id->hw_id_kset);
1044 DRM_ERROR("Couldn't register ip_hw_id kset");
1048 if (hw_id_names[ii]) {
1049 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
1050 &ip_hw_id->hw_id_kset.kobj,
1053 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
1055 kobject_name(&ip_die_entry->ip_kset.kobj));
1060 /* Now register its instance.
1062 ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
1064 ip->num_base_address),
1066 if (!ip_hw_instance) {
1067 DRM_ERROR("no memory for ip_hw_instance");
1070 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
1071 ip_hw_instance->num_instance = ip->instance_number;
1072 ip_hw_instance->major = ip->major;
1073 ip_hw_instance->minor = ip->minor;
1074 ip_hw_instance->revision = ip->revision;
1075 ip_hw_instance->harvest =
1076 amdgpu_discovery_get_harvest_info(
1077 adev, ip_hw_instance->hw_id,
1078 ip_hw_instance->num_instance);
1079 ip_hw_instance->num_base_addresses = ip->num_base_address;
1081 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
1083 ip_hw_instance->base_addr[kk] =
1084 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
1086 ip_hw_instance->base_addr[kk] = ip->base_address[kk];
1089 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
1090 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
1091 res = kobject_add(&ip_hw_instance->kobj, NULL,
1092 "%d", ip_hw_instance->num_instance);
1095 ip_offset += struct_size(ip, base_address_64,
1096 ip->num_base_address);
1098 ip_offset += struct_size(ip, base_address,
1099 ip->num_base_address);
1106 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
1108 struct binary_header *bhdr;
1109 struct ip_discovery_header *ihdr;
1110 struct die_header *dhdr;
1111 struct kset *die_kset = &adev->ip_top->die_kset;
1112 u16 num_dies, die_offset, num_ips;
1116 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1117 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1118 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1119 num_dies = le16_to_cpu(ihdr->num_dies);
1121 DRM_DEBUG("number of dies: %d\n", num_dies);
1123 for (ii = 0; ii < num_dies; ii++) {
1124 struct ip_die_entry *ip_die_entry;
1126 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
1127 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1128 num_ips = le16_to_cpu(dhdr->num_ips);
1129 ip_offset = die_offset + sizeof(*dhdr);
1131 /* Add the die to the kset.
1133 * dhdr->die_id == ii, which was checked in
1134 * amdgpu_discovery_reg_base_init().
1137 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
1141 ip_die_entry->num_ips = num_ips;
1143 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
1144 ip_die_entry->ip_kset.kobj.kset = die_kset;
1145 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
1146 res = kset_register(&ip_die_entry->ip_kset);
1148 DRM_ERROR("Couldn't register ip_die_entry kset");
1149 kfree(ip_die_entry);
1153 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
1159 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
1161 struct kset *die_kset;
1164 if (!adev->mman.discovery_bin)
1167 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
1171 adev->ip_top->adev = adev;
1173 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
1174 &adev->dev->kobj, "ip_discovery");
1176 DRM_ERROR("Couldn't init and add ip_discovery/");
1180 die_kset = &adev->ip_top->die_kset;
1181 kobject_set_name(&die_kset->kobj, "%s", "die");
1182 die_kset->kobj.parent = &adev->ip_top->kobj;
1183 die_kset->kobj.ktype = &die_kobj_ktype;
1184 res = kset_register(&adev->ip_top->die_kset);
1186 DRM_ERROR("Couldn't register die_kset");
1190 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1191 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1192 ip_hw_instance_attrs[ii] = NULL;
1194 res = amdgpu_discovery_sysfs_recurse(adev);
1198 kobject_put(&adev->ip_top->kobj);
1202 /* -------------------------------------------------- */
1204 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1206 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1208 struct list_head *el, *tmp;
1209 struct kset *hw_id_kset;
1211 hw_id_kset = &ip_hw_id->hw_id_kset;
1212 spin_lock(&hw_id_kset->list_lock);
1213 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1215 spin_unlock(&hw_id_kset->list_lock);
1216 /* kobject is embedded in ip_hw_instance */
1217 kobject_put(list_to_kobj(el));
1218 spin_lock(&hw_id_kset->list_lock);
1220 spin_unlock(&hw_id_kset->list_lock);
1221 kobject_put(&ip_hw_id->hw_id_kset.kobj);
1224 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1226 struct list_head *el, *tmp;
1227 struct kset *ip_kset;
1229 ip_kset = &ip_die_entry->ip_kset;
1230 spin_lock(&ip_kset->list_lock);
1231 list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1233 spin_unlock(&ip_kset->list_lock);
1234 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1235 spin_lock(&ip_kset->list_lock);
1237 spin_unlock(&ip_kset->list_lock);
1238 kobject_put(&ip_die_entry->ip_kset.kobj);
1241 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1243 struct list_head *el, *tmp;
1244 struct kset *die_kset;
1246 die_kset = &adev->ip_top->die_kset;
1247 spin_lock(&die_kset->list_lock);
1248 list_for_each_prev_safe(el, tmp, &die_kset->list) {
1250 spin_unlock(&die_kset->list_lock);
1251 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1252 spin_lock(&die_kset->list_lock);
1254 spin_unlock(&die_kset->list_lock);
1255 kobject_put(&adev->ip_top->die_kset.kobj);
1256 kobject_put(&adev->ip_top->kobj);
1259 /* ================================================== */
1261 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1263 uint8_t num_base_address, subrev, variant;
1264 struct binary_header *bhdr;
1265 struct ip_discovery_header *ihdr;
1266 struct die_header *dhdr;
1268 uint16_t die_offset;
1276 r = amdgpu_discovery_init(adev);
1278 DRM_ERROR("amdgpu_discovery_init failed\n");
1282 adev->gfx.xcc_mask = 0;
1283 adev->sdma.sdma_mask = 0;
1284 adev->vcn.inst_mask = 0;
1285 adev->jpeg.inst_mask = 0;
1286 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1287 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1288 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1289 num_dies = le16_to_cpu(ihdr->num_dies);
1291 DRM_DEBUG("number of dies: %d\n", num_dies);
1293 for (i = 0; i < num_dies; i++) {
1294 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1295 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1296 num_ips = le16_to_cpu(dhdr->num_ips);
1297 ip_offset = die_offset + sizeof(*dhdr);
1299 if (le16_to_cpu(dhdr->die_id) != i) {
1300 DRM_ERROR("invalid die id %d, expected %d\n",
1301 le16_to_cpu(dhdr->die_id), i);
1305 DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1306 le16_to_cpu(dhdr->die_id), num_ips);
1308 for (j = 0; j < num_ips; j++) {
1309 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
1311 if (amdgpu_discovery_validate_ip(ip))
1314 num_base_address = ip->num_base_address;
1316 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1317 hw_id_names[le16_to_cpu(ip->hw_id)],
1318 le16_to_cpu(ip->hw_id),
1319 ip->instance_number,
1320 ip->major, ip->minor,
1323 if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1324 /* Bit [5:0]: original revision value
1325 * Bit [7:6]: en/decode capability:
1326 * 0b00 : VCN function normally
1327 * 0b10 : encode is disabled
1328 * 0b01 : decode is disabled
1330 if (adev->vcn.num_vcn_inst <
1331 AMDGPU_MAX_VCN_INSTANCES) {
1332 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1333 ip->revision & 0xc0;
1334 adev->vcn.num_vcn_inst++;
1335 adev->vcn.inst_mask |=
1336 (1U << ip->instance_number);
1337 adev->jpeg.inst_mask |=
1338 (1U << ip->instance_number);
1340 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1341 adev->vcn.num_vcn_inst + 1,
1342 AMDGPU_MAX_VCN_INSTANCES);
1344 ip->revision &= ~0xc0;
1346 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1347 le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1348 le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1349 le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1350 if (adev->sdma.num_instances <
1351 AMDGPU_MAX_SDMA_INSTANCES) {
1352 adev->sdma.num_instances++;
1353 adev->sdma.sdma_mask |=
1354 (1U << ip->instance_number);
1356 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1357 adev->sdma.num_instances + 1,
1358 AMDGPU_MAX_SDMA_INSTANCES);
1362 if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
1363 if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
1364 adev->vpe.num_instances++;
1366 dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
1367 adev->vpe.num_instances + 1,
1368 AMDGPU_MAX_VPE_INSTANCES);
1371 if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1372 adev->gmc.num_umc++;
1373 adev->umc.node_inst_num++;
1376 if (le16_to_cpu(ip->hw_id) == GC_HWID)
1377 adev->gfx.xcc_mask |=
1378 (1U << ip->instance_number);
1380 for (k = 0; k < num_base_address; k++) {
1382 * convert the endianness of base addresses in place,
1383 * so that we don't need to convert them when accessing adev->reg_offset.
1385 if (ihdr->base_addr_64_bit)
1386 /* Truncate the 64bit base address from ip discovery
1387 * and only store lower 32bit ip base in reg_offset[].
1388 * Bits > 32 follows ASIC specific format, thus just
1389 * discard them and handle it within specific ASIC.
1390 * By this way reg_offset[] and related helpers can
1392 * The base address is in dwords, thus clear the
1393 * highest 2 bits to store.
1395 ip->base_address[k] =
1396 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
1398 ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1399 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1402 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1403 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
1404 hw_id_map[hw_ip] != 0) {
1405 DRM_DEBUG("set register base offset for %s\n",
1406 hw_id_names[le16_to_cpu(ip->hw_id)]);
1407 adev->reg_offset[hw_ip][ip->instance_number] =
1409 /* Instance support is somewhat inconsistent.
1410 * SDMA is a good example. Sienna cichlid has 4 total
1411 * SDMA instances, each enumerated separately (HWIDs
1412 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
1413 * but they are enumerated as multiple instances of the
1414 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
1415 * example. On most chips there are multiple instances
1416 * with the same HWID.
1419 if (ihdr->version < 3) {
1423 subrev = ip->sub_revision;
1424 variant = ip->variant;
1427 adev->ip_versions[hw_ip]
1428 [ip->instance_number] =
1429 IP_VERSION_FULL(ip->major,
1438 if (ihdr->base_addr_64_bit)
1439 ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
1441 ip_offset += struct_size(ip, base_address, ip->num_base_address);
1448 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1450 int vcn_harvest_count = 0;
1451 int umc_harvest_count = 0;
1454 * Harvest table does not fit Navi1x and legacy GPUs,
1455 * so read harvest bit per IP data structure to set
1456 * harvest configuration.
1458 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
1459 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 3) &&
1460 amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 4)) {
1461 if ((adev->pdev->device == 0x731E &&
1462 (adev->pdev->revision == 0xC6 ||
1463 adev->pdev->revision == 0xC7)) ||
1464 (adev->pdev->device == 0x7340 &&
1465 adev->pdev->revision == 0xC9) ||
1466 (adev->pdev->device == 0x7360 &&
1467 adev->pdev->revision == 0xC7))
1468 amdgpu_discovery_read_harvest_bit_per_ip(adev,
1469 &vcn_harvest_count);
1471 amdgpu_discovery_read_from_harvest_table(adev,
1473 &umc_harvest_count);
1476 amdgpu_discovery_harvest_config_quirk(adev);
1478 if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1479 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1480 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1483 if (umc_harvest_count < adev->gmc.num_umc) {
1484 adev->gmc.num_umc -= umc_harvest_count;
1489 struct gc_info_v1_0 v1;
1490 struct gc_info_v1_1 v1_1;
1491 struct gc_info_v1_2 v1_2;
1492 struct gc_info_v2_0 v2;
1493 struct gc_info_v2_1 v2_1;
1496 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1498 struct binary_header *bhdr;
1499 union gc_info *gc_info;
1502 if (!adev->mman.discovery_bin) {
1503 DRM_ERROR("ip discovery uninitialized\n");
1507 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1508 offset = le16_to_cpu(bhdr->table_list[GC].offset);
1513 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1515 switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1517 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1518 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1519 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1520 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1521 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1522 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1523 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1524 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1525 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1526 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1527 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1528 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1529 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1530 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1531 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1532 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1533 le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1534 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1535 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
1536 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1537 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1538 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1540 if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
1541 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1542 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1543 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1544 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1545 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1546 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1547 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1548 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1552 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1553 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1554 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1555 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1556 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1557 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1558 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1559 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1560 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1561 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1562 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1563 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1564 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1565 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1566 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1567 le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1568 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1569 if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
1570 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
1571 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
1572 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
1573 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
1574 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
1575 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
1576 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
1581 "Unhandled GC info table %d.%d\n",
1582 le16_to_cpu(gc_info->v1.header.version_major),
1583 le16_to_cpu(gc_info->v1.header.version_minor));
1590 struct mall_info_v1_0 v1;
1591 struct mall_info_v2_0 v2;
1594 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1596 struct binary_header *bhdr;
1597 union mall_info *mall_info;
1598 u32 u, mall_size_per_umc, m_s_present, half_use;
1602 if (!adev->mman.discovery_bin) {
1603 DRM_ERROR("ip discovery uninitialized\n");
1607 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1608 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1613 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1615 switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1618 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1619 m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1620 half_use = le32_to_cpu(mall_info->v1.m_half_use);
1621 for (u = 0; u < adev->gmc.num_umc; u++) {
1622 if (m_s_present & (1 << u))
1623 mall_size += mall_size_per_umc * 2;
1624 else if (half_use & (1 << u))
1625 mall_size += mall_size_per_umc / 2;
1627 mall_size += mall_size_per_umc;
1629 adev->gmc.mall_size = mall_size;
1630 adev->gmc.m_half_use = half_use;
1633 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
1634 adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
1638 "Unhandled MALL info table %d.%d\n",
1639 le16_to_cpu(mall_info->v1.header.version_major),
1640 le16_to_cpu(mall_info->v1.header.version_minor));
1647 struct vcn_info_v1_0 v1;
1650 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1652 struct binary_header *bhdr;
1653 union vcn_info *vcn_info;
1657 if (!adev->mman.discovery_bin) {
1658 DRM_ERROR("ip discovery uninitialized\n");
1662 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1663 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1664 * but that may change in the future with new GPUs so keep this
1665 * check for defensive purposes.
1667 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1668 dev_err(adev->dev, "invalid vcn instances\n");
1672 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1673 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1678 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1680 switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1682 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1683 * so this won't overflow.
1685 for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1686 adev->vcn.vcn_codec_disable_mask[v] =
1687 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1692 "Unhandled VCN info table %d.%d\n",
1693 le16_to_cpu(vcn_info->v1.header.version_major),
1694 le16_to_cpu(vcn_info->v1.header.version_minor));
1701 struct nps_info_v1_0 v1;
1704 int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
1706 struct amdgpu_gmc_memrange **ranges,
1709 struct amdgpu_gmc_memrange *mem_ranges;
1710 struct binary_header *bhdr;
1711 union nps_info *nps_info;
1715 if (!nps_type || !range_cnt || !ranges)
1718 if (!adev->mman.discovery_bin) {
1720 "fetch mem range failed, ip discovery uninitialized\n");
1724 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1725 offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
1730 /* If verification fails, return as if NPS table doesn't exist */
1731 if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
1734 nps_info = (union nps_info *)(adev->mman.discovery_bin + offset);
1736 switch (le16_to_cpu(nps_info->v1.header.version_major)) {
1738 *nps_type = nps_info->v1.nps_type;
1739 *range_cnt = nps_info->v1.count;
1740 mem_ranges = kvzalloc(
1741 *range_cnt * sizeof(struct amdgpu_gmc_memrange),
1743 for (i = 0; i < *range_cnt; i++) {
1744 mem_ranges[i].base_address =
1745 nps_info->v1.instance_info[i].base_address;
1746 mem_ranges[i].limit_address =
1747 nps_info->v1.instance_info[i].limit_address;
1748 mem_ranges[i].nid_mask = -1;
1749 mem_ranges[i].flags = 0;
1751 *ranges = mem_ranges;
1754 dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
1755 le16_to_cpu(nps_info->v1.header.version_major),
1756 le16_to_cpu(nps_info->v1.header.version_minor));
1763 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1765 /* what IP to use for this? */
1766 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1767 case IP_VERSION(9, 0, 1):
1768 case IP_VERSION(9, 1, 0):
1769 case IP_VERSION(9, 2, 1):
1770 case IP_VERSION(9, 2, 2):
1771 case IP_VERSION(9, 3, 0):
1772 case IP_VERSION(9, 4, 0):
1773 case IP_VERSION(9, 4, 1):
1774 case IP_VERSION(9, 4, 2):
1775 case IP_VERSION(9, 4, 3):
1776 case IP_VERSION(9, 4, 4):
1777 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1779 case IP_VERSION(10, 1, 10):
1780 case IP_VERSION(10, 1, 1):
1781 case IP_VERSION(10, 1, 2):
1782 case IP_VERSION(10, 1, 3):
1783 case IP_VERSION(10, 1, 4):
1784 case IP_VERSION(10, 3, 0):
1785 case IP_VERSION(10, 3, 1):
1786 case IP_VERSION(10, 3, 2):
1787 case IP_VERSION(10, 3, 3):
1788 case IP_VERSION(10, 3, 4):
1789 case IP_VERSION(10, 3, 5):
1790 case IP_VERSION(10, 3, 6):
1791 case IP_VERSION(10, 3, 7):
1792 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1794 case IP_VERSION(11, 0, 0):
1795 case IP_VERSION(11, 0, 1):
1796 case IP_VERSION(11, 0, 2):
1797 case IP_VERSION(11, 0, 3):
1798 case IP_VERSION(11, 0, 4):
1799 case IP_VERSION(11, 5, 0):
1800 case IP_VERSION(11, 5, 1):
1801 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1803 case IP_VERSION(12, 0, 0):
1804 case IP_VERSION(12, 0, 1):
1805 amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
1809 "Failed to add common ip block(GC_HWIP:0x%x)\n",
1810 amdgpu_ip_version(adev, GC_HWIP, 0));
1816 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1818 /* use GC or MMHUB IP version */
1819 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1820 case IP_VERSION(9, 0, 1):
1821 case IP_VERSION(9, 1, 0):
1822 case IP_VERSION(9, 2, 1):
1823 case IP_VERSION(9, 2, 2):
1824 case IP_VERSION(9, 3, 0):
1825 case IP_VERSION(9, 4, 0):
1826 case IP_VERSION(9, 4, 1):
1827 case IP_VERSION(9, 4, 2):
1828 case IP_VERSION(9, 4, 3):
1829 case IP_VERSION(9, 4, 4):
1830 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1832 case IP_VERSION(10, 1, 10):
1833 case IP_VERSION(10, 1, 1):
1834 case IP_VERSION(10, 1, 2):
1835 case IP_VERSION(10, 1, 3):
1836 case IP_VERSION(10, 1, 4):
1837 case IP_VERSION(10, 3, 0):
1838 case IP_VERSION(10, 3, 1):
1839 case IP_VERSION(10, 3, 2):
1840 case IP_VERSION(10, 3, 3):
1841 case IP_VERSION(10, 3, 4):
1842 case IP_VERSION(10, 3, 5):
1843 case IP_VERSION(10, 3, 6):
1844 case IP_VERSION(10, 3, 7):
1845 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1847 case IP_VERSION(11, 0, 0):
1848 case IP_VERSION(11, 0, 1):
1849 case IP_VERSION(11, 0, 2):
1850 case IP_VERSION(11, 0, 3):
1851 case IP_VERSION(11, 0, 4):
1852 case IP_VERSION(11, 5, 0):
1853 case IP_VERSION(11, 5, 1):
1854 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1856 case IP_VERSION(12, 0, 0):
1857 case IP_VERSION(12, 0, 1):
1858 amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
1861 dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1862 amdgpu_ip_version(adev, GC_HWIP, 0));
1868 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1870 switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
1871 case IP_VERSION(4, 0, 0):
1872 case IP_VERSION(4, 0, 1):
1873 case IP_VERSION(4, 1, 0):
1874 case IP_VERSION(4, 1, 1):
1875 case IP_VERSION(4, 3, 0):
1876 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1878 case IP_VERSION(4, 2, 0):
1879 case IP_VERSION(4, 2, 1):
1880 case IP_VERSION(4, 4, 0):
1881 case IP_VERSION(4, 4, 2):
1882 case IP_VERSION(4, 4, 5):
1883 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1885 case IP_VERSION(5, 0, 0):
1886 case IP_VERSION(5, 0, 1):
1887 case IP_VERSION(5, 0, 2):
1888 case IP_VERSION(5, 0, 3):
1889 case IP_VERSION(5, 2, 0):
1890 case IP_VERSION(5, 2, 1):
1891 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1893 case IP_VERSION(6, 0, 0):
1894 case IP_VERSION(6, 0, 1):
1895 case IP_VERSION(6, 0, 2):
1896 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1898 case IP_VERSION(6, 1, 0):
1899 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
1901 case IP_VERSION(7, 0, 0):
1902 amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
1906 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1907 amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
1913 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1915 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1916 case IP_VERSION(9, 0, 0):
1917 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1919 case IP_VERSION(10, 0, 0):
1920 case IP_VERSION(10, 0, 1):
1921 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1923 case IP_VERSION(11, 0, 0):
1924 case IP_VERSION(11, 0, 2):
1925 case IP_VERSION(11, 0, 4):
1926 case IP_VERSION(11, 0, 5):
1927 case IP_VERSION(11, 0, 9):
1928 case IP_VERSION(11, 0, 7):
1929 case IP_VERSION(11, 0, 11):
1930 case IP_VERSION(11, 0, 12):
1931 case IP_VERSION(11, 0, 13):
1932 case IP_VERSION(11, 5, 0):
1933 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1935 case IP_VERSION(11, 0, 8):
1936 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1938 case IP_VERSION(11, 0, 3):
1939 case IP_VERSION(12, 0, 1):
1940 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1942 case IP_VERSION(13, 0, 0):
1943 case IP_VERSION(13, 0, 1):
1944 case IP_VERSION(13, 0, 2):
1945 case IP_VERSION(13, 0, 3):
1946 case IP_VERSION(13, 0, 5):
1947 case IP_VERSION(13, 0, 6):
1948 case IP_VERSION(13, 0, 7):
1949 case IP_VERSION(13, 0, 8):
1950 case IP_VERSION(13, 0, 10):
1951 case IP_VERSION(13, 0, 11):
1952 case IP_VERSION(13, 0, 14):
1953 case IP_VERSION(14, 0, 0):
1954 case IP_VERSION(14, 0, 1):
1955 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1957 case IP_VERSION(13, 0, 4):
1958 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1960 case IP_VERSION(14, 0, 2):
1961 case IP_VERSION(14, 0, 3):
1962 amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
1966 "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1967 amdgpu_ip_version(adev, MP0_HWIP, 0));
1973 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1975 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1976 case IP_VERSION(9, 0, 0):
1977 case IP_VERSION(10, 0, 0):
1978 case IP_VERSION(10, 0, 1):
1979 case IP_VERSION(11, 0, 2):
1980 if (adev->asic_type == CHIP_ARCTURUS)
1981 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1983 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1985 case IP_VERSION(11, 0, 0):
1986 case IP_VERSION(11, 0, 5):
1987 case IP_VERSION(11, 0, 9):
1988 case IP_VERSION(11, 0, 7):
1989 case IP_VERSION(11, 0, 8):
1990 case IP_VERSION(11, 0, 11):
1991 case IP_VERSION(11, 0, 12):
1992 case IP_VERSION(11, 0, 13):
1993 case IP_VERSION(11, 5, 0):
1994 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1996 case IP_VERSION(12, 0, 0):
1997 case IP_VERSION(12, 0, 1):
1998 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
2000 case IP_VERSION(13, 0, 0):
2001 case IP_VERSION(13, 0, 1):
2002 case IP_VERSION(13, 0, 2):
2003 case IP_VERSION(13, 0, 3):
2004 case IP_VERSION(13, 0, 4):
2005 case IP_VERSION(13, 0, 5):
2006 case IP_VERSION(13, 0, 6):
2007 case IP_VERSION(13, 0, 7):
2008 case IP_VERSION(13, 0, 8):
2009 case IP_VERSION(13, 0, 10):
2010 case IP_VERSION(13, 0, 11):
2011 case IP_VERSION(13, 0, 14):
2012 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
2014 case IP_VERSION(14, 0, 0):
2015 case IP_VERSION(14, 0, 1):
2016 case IP_VERSION(14, 0, 2):
2017 case IP_VERSION(14, 0, 3):
2018 amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
2022 "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
2023 amdgpu_ip_version(adev, MP1_HWIP, 0));
2029 #if defined(CONFIG_DRM_AMD_DC)
2030 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
2032 amdgpu_device_set_sriov_virtual_display(adev);
2033 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2037 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
2039 if (adev->enable_virtual_display) {
2040 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2044 if (!amdgpu_device_has_dc_support(adev))
2047 #if defined(CONFIG_DRM_AMD_DC)
2048 if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2049 switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
2050 case IP_VERSION(1, 0, 0):
2051 case IP_VERSION(1, 0, 1):
2052 case IP_VERSION(2, 0, 2):
2053 case IP_VERSION(2, 0, 0):
2054 case IP_VERSION(2, 0, 3):
2055 case IP_VERSION(2, 1, 0):
2056 case IP_VERSION(3, 0, 0):
2057 case IP_VERSION(3, 0, 2):
2058 case IP_VERSION(3, 0, 3):
2059 case IP_VERSION(3, 0, 1):
2060 case IP_VERSION(3, 1, 2):
2061 case IP_VERSION(3, 1, 3):
2062 case IP_VERSION(3, 1, 4):
2063 case IP_VERSION(3, 1, 5):
2064 case IP_VERSION(3, 1, 6):
2065 case IP_VERSION(3, 2, 0):
2066 case IP_VERSION(3, 2, 1):
2067 case IP_VERSION(3, 5, 0):
2068 case IP_VERSION(3, 5, 1):
2069 case IP_VERSION(4, 1, 0):
2070 /* TODO: Fix IP version. DC code expects version 4.0.1 */
2071 if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
2072 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
2074 if (amdgpu_sriov_vf(adev))
2075 amdgpu_discovery_set_sriov_display(adev);
2077 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2081 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
2082 amdgpu_ip_version(adev, DCE_HWIP, 0));
2085 } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2086 switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
2087 case IP_VERSION(12, 0, 0):
2088 case IP_VERSION(12, 0, 1):
2089 case IP_VERSION(12, 1, 0):
2090 if (amdgpu_sriov_vf(adev))
2091 amdgpu_discovery_set_sriov_display(adev);
2093 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2097 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
2098 amdgpu_ip_version(adev, DCI_HWIP, 0));
2106 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
2108 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2109 case IP_VERSION(9, 0, 1):
2110 case IP_VERSION(9, 1, 0):
2111 case IP_VERSION(9, 2, 1):
2112 case IP_VERSION(9, 2, 2):
2113 case IP_VERSION(9, 3, 0):
2114 case IP_VERSION(9, 4, 0):
2115 case IP_VERSION(9, 4, 1):
2116 case IP_VERSION(9, 4, 2):
2117 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
2119 case IP_VERSION(9, 4, 3):
2120 case IP_VERSION(9, 4, 4):
2121 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
2123 case IP_VERSION(10, 1, 10):
2124 case IP_VERSION(10, 1, 2):
2125 case IP_VERSION(10, 1, 1):
2126 case IP_VERSION(10, 1, 3):
2127 case IP_VERSION(10, 1, 4):
2128 case IP_VERSION(10, 3, 0):
2129 case IP_VERSION(10, 3, 2):
2130 case IP_VERSION(10, 3, 1):
2131 case IP_VERSION(10, 3, 4):
2132 case IP_VERSION(10, 3, 5):
2133 case IP_VERSION(10, 3, 6):
2134 case IP_VERSION(10, 3, 3):
2135 case IP_VERSION(10, 3, 7):
2136 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
2138 case IP_VERSION(11, 0, 0):
2139 case IP_VERSION(11, 0, 1):
2140 case IP_VERSION(11, 0, 2):
2141 case IP_VERSION(11, 0, 3):
2142 case IP_VERSION(11, 0, 4):
2143 case IP_VERSION(11, 5, 0):
2144 case IP_VERSION(11, 5, 1):
2145 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
2147 case IP_VERSION(12, 0, 0):
2148 case IP_VERSION(12, 0, 1):
2149 if (!amdgpu_exp_hw_support)
2151 amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
2154 dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
2155 amdgpu_ip_version(adev, GC_HWIP, 0));
2161 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
2163 switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
2164 case IP_VERSION(4, 0, 0):
2165 case IP_VERSION(4, 0, 1):
2166 case IP_VERSION(4, 1, 0):
2167 case IP_VERSION(4, 1, 1):
2168 case IP_VERSION(4, 1, 2):
2169 case IP_VERSION(4, 2, 0):
2170 case IP_VERSION(4, 2, 2):
2171 case IP_VERSION(4, 4, 0):
2172 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
2174 case IP_VERSION(4, 4, 2):
2175 case IP_VERSION(4, 4, 5):
2176 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
2178 case IP_VERSION(5, 0, 0):
2179 case IP_VERSION(5, 0, 1):
2180 case IP_VERSION(5, 0, 2):
2181 case IP_VERSION(5, 0, 5):
2182 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
2184 case IP_VERSION(5, 2, 0):
2185 case IP_VERSION(5, 2, 2):
2186 case IP_VERSION(5, 2, 4):
2187 case IP_VERSION(5, 2, 5):
2188 case IP_VERSION(5, 2, 6):
2189 case IP_VERSION(5, 2, 3):
2190 case IP_VERSION(5, 2, 1):
2191 case IP_VERSION(5, 2, 7):
2192 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
2194 case IP_VERSION(6, 0, 0):
2195 case IP_VERSION(6, 0, 1):
2196 case IP_VERSION(6, 0, 2):
2197 case IP_VERSION(6, 0, 3):
2198 case IP_VERSION(6, 1, 0):
2199 case IP_VERSION(6, 1, 1):
2200 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
2202 case IP_VERSION(7, 0, 0):
2203 case IP_VERSION(7, 0, 1):
2204 amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
2208 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
2209 amdgpu_ip_version(adev, SDMA0_HWIP, 0));
2215 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
2217 if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2218 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2219 case IP_VERSION(7, 0, 0):
2220 case IP_VERSION(7, 2, 0):
2221 /* UVD is not supported on vega20 SR-IOV */
2222 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2223 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
2227 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
2228 amdgpu_ip_version(adev, UVD_HWIP, 0));
2231 switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
2232 case IP_VERSION(4, 0, 0):
2233 case IP_VERSION(4, 1, 0):
2234 /* VCE is not supported on vega20 SR-IOV */
2235 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
2236 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
2240 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
2241 amdgpu_ip_version(adev, VCE_HWIP, 0));
2245 switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
2246 case IP_VERSION(1, 0, 0):
2247 case IP_VERSION(1, 0, 1):
2248 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
2250 case IP_VERSION(2, 0, 0):
2251 case IP_VERSION(2, 0, 2):
2252 case IP_VERSION(2, 2, 0):
2253 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
2254 if (!amdgpu_sriov_vf(adev))
2255 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
2257 case IP_VERSION(2, 0, 3):
2259 case IP_VERSION(2, 5, 0):
2260 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
2261 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
2263 case IP_VERSION(2, 6, 0):
2264 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
2265 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
2267 case IP_VERSION(3, 0, 0):
2268 case IP_VERSION(3, 0, 16):
2269 case IP_VERSION(3, 1, 1):
2270 case IP_VERSION(3, 1, 2):
2271 case IP_VERSION(3, 0, 2):
2272 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2273 if (!amdgpu_sriov_vf(adev))
2274 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
2276 case IP_VERSION(3, 0, 33):
2277 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
2279 case IP_VERSION(4, 0, 0):
2280 case IP_VERSION(4, 0, 2):
2281 case IP_VERSION(4, 0, 4):
2282 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
2283 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
2285 case IP_VERSION(4, 0, 3):
2286 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
2287 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
2289 case IP_VERSION(4, 0, 5):
2290 case IP_VERSION(4, 0, 6):
2291 amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
2292 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
2294 case IP_VERSION(5, 0, 0):
2295 amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
2296 amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
2297 if (amdgpu_jpeg_test)
2298 adev->enable_jpeg_test = true;
2302 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
2303 amdgpu_ip_version(adev, UVD_HWIP, 0));
2310 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
2312 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2313 case IP_VERSION(11, 0, 0):
2314 case IP_VERSION(11, 0, 1):
2315 case IP_VERSION(11, 0, 2):
2316 case IP_VERSION(11, 0, 3):
2317 case IP_VERSION(11, 0, 4):
2318 case IP_VERSION(11, 5, 0):
2319 case IP_VERSION(11, 5, 1):
2320 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
2321 adev->enable_mes = true;
2322 adev->enable_mes_kiq = true;
2324 case IP_VERSION(12, 0, 0):
2325 case IP_VERSION(12, 0, 1):
2326 amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
2327 adev->enable_mes = true;
2328 adev->enable_mes_kiq = true;
2330 adev->enable_uni_mes = true;
2338 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
2340 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2341 case IP_VERSION(9, 4, 3):
2342 case IP_VERSION(9, 4, 4):
2343 aqua_vanjaram_init_soc_config(adev);
2350 static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
2352 switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
2353 case IP_VERSION(6, 1, 0):
2354 case IP_VERSION(6, 1, 1):
2355 amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
2364 static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
2366 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
2367 case IP_VERSION(4, 0, 5):
2368 case IP_VERSION(4, 0, 6):
2369 if (amdgpu_umsch_mm & 0x1) {
2370 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
2371 adev->enable_umsch_mm = true;
2381 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
2385 switch (adev->asic_type) {
2387 vega10_reg_base_init(adev);
2388 adev->sdma.num_instances = 2;
2389 adev->gmc.num_umc = 4;
2390 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2391 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2392 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2393 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2394 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2395 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2396 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2397 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2398 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2399 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2400 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2401 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2402 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2403 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2404 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2405 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2406 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2409 vega10_reg_base_init(adev);
2410 adev->sdma.num_instances = 2;
2411 adev->gmc.num_umc = 4;
2412 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2413 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2414 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2415 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2416 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2417 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2418 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2419 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2420 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2421 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2422 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2423 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2424 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2425 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2426 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2427 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2428 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2431 vega10_reg_base_init(adev);
2432 adev->sdma.num_instances = 1;
2433 adev->vcn.num_vcn_inst = 1;
2434 adev->gmc.num_umc = 2;
2435 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2436 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2437 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2438 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2439 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2440 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2441 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2442 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2443 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2444 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2445 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2446 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2447 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2448 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2449 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2450 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2452 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2453 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2454 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2455 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2456 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2457 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2458 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2459 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2460 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2461 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2462 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2463 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2464 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2465 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2466 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2470 vega20_reg_base_init(adev);
2471 adev->sdma.num_instances = 2;
2472 adev->gmc.num_umc = 8;
2473 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2474 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2475 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2476 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2477 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2478 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2479 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2480 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2481 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2482 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2483 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2484 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2485 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2486 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2487 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2488 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2489 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2490 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2493 arct_reg_base_init(adev);
2494 adev->sdma.num_instances = 8;
2495 adev->vcn.num_vcn_inst = 2;
2496 adev->gmc.num_umc = 8;
2497 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2498 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2499 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2500 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2501 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2502 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2503 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2504 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2505 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2506 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2507 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2508 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2509 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2510 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2511 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2512 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2513 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2514 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2515 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2516 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2517 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2518 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2520 case CHIP_ALDEBARAN:
2521 aldebaran_reg_base_init(adev);
2522 adev->sdma.num_instances = 5;
2523 adev->vcn.num_vcn_inst = 2;
2524 adev->gmc.num_umc = 4;
2525 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2526 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2527 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2528 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2529 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2530 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2531 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2532 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2533 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2534 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2535 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2536 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2537 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2538 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2539 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2540 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2541 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2542 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2543 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2544 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2547 r = amdgpu_discovery_reg_base_init(adev);
2551 amdgpu_discovery_harvest_ip(adev);
2552 amdgpu_discovery_get_gfx_info(adev);
2553 amdgpu_discovery_get_mall_info(adev);
2554 amdgpu_discovery_get_vcn_info(adev);
2558 amdgpu_discovery_init_soc_config(adev);
2559 amdgpu_discovery_sysfs_init(adev);
2561 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2562 case IP_VERSION(9, 0, 1):
2563 case IP_VERSION(9, 2, 1):
2564 case IP_VERSION(9, 4, 0):
2565 case IP_VERSION(9, 4, 1):
2566 case IP_VERSION(9, 4, 2):
2567 case IP_VERSION(9, 4, 3):
2568 case IP_VERSION(9, 4, 4):
2569 adev->family = AMDGPU_FAMILY_AI;
2571 case IP_VERSION(9, 1, 0):
2572 case IP_VERSION(9, 2, 2):
2573 case IP_VERSION(9, 3, 0):
2574 adev->family = AMDGPU_FAMILY_RV;
2576 case IP_VERSION(10, 1, 10):
2577 case IP_VERSION(10, 1, 1):
2578 case IP_VERSION(10, 1, 2):
2579 case IP_VERSION(10, 1, 3):
2580 case IP_VERSION(10, 1, 4):
2581 case IP_VERSION(10, 3, 0):
2582 case IP_VERSION(10, 3, 2):
2583 case IP_VERSION(10, 3, 4):
2584 case IP_VERSION(10, 3, 5):
2585 adev->family = AMDGPU_FAMILY_NV;
2587 case IP_VERSION(10, 3, 1):
2588 adev->family = AMDGPU_FAMILY_VGH;
2589 adev->apu_flags |= AMD_APU_IS_VANGOGH;
2591 case IP_VERSION(10, 3, 3):
2592 adev->family = AMDGPU_FAMILY_YC;
2594 case IP_VERSION(10, 3, 6):
2595 adev->family = AMDGPU_FAMILY_GC_10_3_6;
2597 case IP_VERSION(10, 3, 7):
2598 adev->family = AMDGPU_FAMILY_GC_10_3_7;
2600 case IP_VERSION(11, 0, 0):
2601 case IP_VERSION(11, 0, 2):
2602 case IP_VERSION(11, 0, 3):
2603 adev->family = AMDGPU_FAMILY_GC_11_0_0;
2605 case IP_VERSION(11, 0, 1):
2606 case IP_VERSION(11, 0, 4):
2607 adev->family = AMDGPU_FAMILY_GC_11_0_1;
2609 case IP_VERSION(11, 5, 0):
2610 case IP_VERSION(11, 5, 1):
2611 adev->family = AMDGPU_FAMILY_GC_11_5_0;
2613 case IP_VERSION(12, 0, 0):
2614 case IP_VERSION(12, 0, 1):
2615 adev->family = AMDGPU_FAMILY_GC_12_0_0;
2621 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2622 case IP_VERSION(9, 1, 0):
2623 case IP_VERSION(9, 2, 2):
2624 case IP_VERSION(9, 3, 0):
2625 case IP_VERSION(10, 1, 3):
2626 case IP_VERSION(10, 1, 4):
2627 case IP_VERSION(10, 3, 1):
2628 case IP_VERSION(10, 3, 3):
2629 case IP_VERSION(10, 3, 6):
2630 case IP_VERSION(10, 3, 7):
2631 case IP_VERSION(11, 0, 1):
2632 case IP_VERSION(11, 0, 4):
2633 case IP_VERSION(11, 5, 0):
2634 case IP_VERSION(11, 5, 1):
2635 adev->flags |= AMD_IS_APU;
2641 if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
2642 adev->gmc.xgmi.supported = true;
2644 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
2645 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4))
2646 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
2648 /* set NBIO version */
2649 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2650 case IP_VERSION(6, 1, 0):
2651 case IP_VERSION(6, 2, 0):
2652 adev->nbio.funcs = &nbio_v6_1_funcs;
2653 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2655 case IP_VERSION(7, 0, 0):
2656 case IP_VERSION(7, 0, 1):
2657 case IP_VERSION(2, 5, 0):
2658 adev->nbio.funcs = &nbio_v7_0_funcs;
2659 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2661 case IP_VERSION(7, 4, 0):
2662 case IP_VERSION(7, 4, 1):
2663 case IP_VERSION(7, 4, 4):
2664 adev->nbio.funcs = &nbio_v7_4_funcs;
2665 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2667 case IP_VERSION(7, 9, 0):
2668 adev->nbio.funcs = &nbio_v7_9_funcs;
2669 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
2671 case IP_VERSION(7, 11, 0):
2672 case IP_VERSION(7, 11, 1):
2673 adev->nbio.funcs = &nbio_v7_11_funcs;
2674 adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
2676 case IP_VERSION(7, 2, 0):
2677 case IP_VERSION(7, 2, 1):
2678 case IP_VERSION(7, 3, 0):
2679 case IP_VERSION(7, 5, 0):
2680 case IP_VERSION(7, 5, 1):
2681 adev->nbio.funcs = &nbio_v7_2_funcs;
2682 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2684 case IP_VERSION(2, 1, 1):
2685 case IP_VERSION(2, 3, 0):
2686 case IP_VERSION(2, 3, 1):
2687 case IP_VERSION(2, 3, 2):
2688 case IP_VERSION(3, 3, 0):
2689 case IP_VERSION(3, 3, 1):
2690 case IP_VERSION(3, 3, 2):
2691 case IP_VERSION(3, 3, 3):
2692 adev->nbio.funcs = &nbio_v2_3_funcs;
2693 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2695 case IP_VERSION(4, 3, 0):
2696 case IP_VERSION(4, 3, 1):
2697 if (amdgpu_sriov_vf(adev))
2698 adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2700 adev->nbio.funcs = &nbio_v4_3_funcs;
2701 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2703 case IP_VERSION(7, 7, 0):
2704 case IP_VERSION(7, 7, 1):
2705 adev->nbio.funcs = &nbio_v7_7_funcs;
2706 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2708 case IP_VERSION(6, 3, 1):
2709 adev->nbio.funcs = &nbif_v6_3_1_funcs;
2710 adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
2716 switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
2717 case IP_VERSION(4, 0, 0):
2718 case IP_VERSION(4, 0, 1):
2719 case IP_VERSION(4, 1, 0):
2720 case IP_VERSION(4, 1, 1):
2721 case IP_VERSION(4, 1, 2):
2722 case IP_VERSION(4, 2, 0):
2723 case IP_VERSION(4, 2, 1):
2724 case IP_VERSION(4, 4, 0):
2725 case IP_VERSION(4, 4, 2):
2726 case IP_VERSION(4, 4, 5):
2727 adev->hdp.funcs = &hdp_v4_0_funcs;
2729 case IP_VERSION(5, 0, 0):
2730 case IP_VERSION(5, 0, 1):
2731 case IP_VERSION(5, 0, 2):
2732 case IP_VERSION(5, 0, 3):
2733 case IP_VERSION(5, 0, 4):
2734 case IP_VERSION(5, 2, 0):
2735 adev->hdp.funcs = &hdp_v5_0_funcs;
2737 case IP_VERSION(5, 2, 1):
2738 adev->hdp.funcs = &hdp_v5_2_funcs;
2740 case IP_VERSION(6, 0, 0):
2741 case IP_VERSION(6, 0, 1):
2742 case IP_VERSION(6, 1, 0):
2743 adev->hdp.funcs = &hdp_v6_0_funcs;
2745 case IP_VERSION(7, 0, 0):
2746 adev->hdp.funcs = &hdp_v7_0_funcs;
2752 switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
2753 case IP_VERSION(3, 6, 0):
2754 case IP_VERSION(3, 6, 1):
2755 case IP_VERSION(3, 6, 2):
2756 adev->df.funcs = &df_v3_6_funcs;
2758 case IP_VERSION(2, 1, 0):
2759 case IP_VERSION(2, 1, 1):
2760 case IP_VERSION(2, 5, 0):
2761 case IP_VERSION(3, 5, 1):
2762 case IP_VERSION(3, 5, 2):
2763 adev->df.funcs = &df_v1_7_funcs;
2765 case IP_VERSION(4, 3, 0):
2766 adev->df.funcs = &df_v4_3_funcs;
2768 case IP_VERSION(4, 6, 2):
2769 adev->df.funcs = &df_v4_6_2_funcs;
2775 switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
2776 case IP_VERSION(9, 0, 0):
2777 case IP_VERSION(9, 0, 1):
2778 case IP_VERSION(10, 0, 0):
2779 case IP_VERSION(10, 0, 1):
2780 case IP_VERSION(10, 0, 2):
2781 adev->smuio.funcs = &smuio_v9_0_funcs;
2783 case IP_VERSION(11, 0, 0):
2784 case IP_VERSION(11, 0, 2):
2785 case IP_VERSION(11, 0, 3):
2786 case IP_VERSION(11, 0, 4):
2787 case IP_VERSION(11, 0, 7):
2788 case IP_VERSION(11, 0, 8):
2789 adev->smuio.funcs = &smuio_v11_0_funcs;
2791 case IP_VERSION(11, 0, 6):
2792 case IP_VERSION(11, 0, 10):
2793 case IP_VERSION(11, 0, 11):
2794 case IP_VERSION(11, 5, 0):
2795 case IP_VERSION(13, 0, 1):
2796 case IP_VERSION(13, 0, 9):
2797 case IP_VERSION(13, 0, 10):
2798 adev->smuio.funcs = &smuio_v11_0_6_funcs;
2800 case IP_VERSION(13, 0, 2):
2801 adev->smuio.funcs = &smuio_v13_0_funcs;
2803 case IP_VERSION(13, 0, 3):
2804 adev->smuio.funcs = &smuio_v13_0_3_funcs;
2805 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
2806 adev->flags |= AMD_IS_APU;
2809 case IP_VERSION(13, 0, 6):
2810 case IP_VERSION(13, 0, 8):
2811 case IP_VERSION(14, 0, 0):
2812 case IP_VERSION(14, 0, 1):
2813 adev->smuio.funcs = &smuio_v13_0_6_funcs;
2815 case IP_VERSION(14, 0, 2):
2816 adev->smuio.funcs = &smuio_v14_0_2_funcs;
2822 switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
2823 case IP_VERSION(6, 0, 0):
2824 case IP_VERSION(6, 0, 1):
2825 case IP_VERSION(6, 0, 2):
2826 case IP_VERSION(6, 0, 3):
2827 adev->lsdma.funcs = &lsdma_v6_0_funcs;
2829 case IP_VERSION(7, 0, 0):
2830 case IP_VERSION(7, 0, 1):
2831 adev->lsdma.funcs = &lsdma_v7_0_funcs;
2837 r = amdgpu_discovery_set_common_ip_blocks(adev);
2841 r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2845 /* For SR-IOV, PSP needs to be initialized before IH */
2846 if (amdgpu_sriov_vf(adev)) {
2847 r = amdgpu_discovery_set_psp_ip_blocks(adev);
2850 r = amdgpu_discovery_set_ih_ip_blocks(adev);
2854 r = amdgpu_discovery_set_ih_ip_blocks(adev);
2858 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2859 r = amdgpu_discovery_set_psp_ip_blocks(adev);
2865 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2866 r = amdgpu_discovery_set_smu_ip_blocks(adev);
2871 r = amdgpu_discovery_set_display_ip_blocks(adev);
2875 r = amdgpu_discovery_set_gc_ip_blocks(adev);
2879 r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2883 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2884 !amdgpu_sriov_vf(adev)) ||
2885 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2886 r = amdgpu_discovery_set_smu_ip_blocks(adev);
2891 r = amdgpu_discovery_set_mm_ip_blocks(adev);
2895 r = amdgpu_discovery_set_mes_ip_blocks(adev);
2899 r = amdgpu_discovery_set_vpe_ip_blocks(adev);
2903 r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);