2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <drm/amdgpu_drm.h>
26 #include "atomfirmware.h"
27 #include "amdgpu_atomfirmware.h"
30 #include "soc15_hw_ip.h"
33 struct atom_firmware_info_v3_1 v31;
34 struct atom_firmware_info_v3_2 v32;
35 struct atom_firmware_info_v3_3 v33;
36 struct atom_firmware_info_v3_4 v34;
40 * Helper function to query firmware capability
42 * @adev: amdgpu_device pointer
44 * Return firmware_capability in firmwareinfo table on success or 0 if not
46 uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev)
48 struct amdgpu_mode_info *mode_info = &adev->mode_info;
50 u16 data_offset, size;
51 union firmware_info *firmware_info;
55 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
58 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
59 index, &size, &frev, &crev, &data_offset)) {
60 /* support firmware_info 3.1 + */
61 if ((frev == 3 && crev >= 1) || (frev > 3)) {
62 firmware_info = (union firmware_info *)
63 (mode_info->atom_context->bios + data_offset);
64 fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability);
72 * Helper function to query gpu virtualizaiton capability
74 * @adev: amdgpu_device pointer
76 * Return true if gpu virtualization is supported or false if not
78 bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev)
82 fw_cap = adev->mode_info.firmware_flags;
84 return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false;
87 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
89 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
93 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
94 NULL, NULL, &data_offset)) {
95 struct atom_firmware_info_v3_1 *firmware_info =
96 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
99 adev->bios_scratch_reg_offset =
100 le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
104 static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
105 struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes)
107 u32 start_addr, fw_size, drv_size;
109 start_addr = le32_to_cpu(fw_usage->start_address_in_kb);
110 fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
111 drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb);
113 DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n",
118 if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
119 (u32)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
120 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
121 /* Firmware request VRAM reservation for SR-IOV */
122 adev->mman.fw_vram_usage_start_offset = (start_addr &
123 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
124 adev->mman.fw_vram_usage_size = fw_size << 10;
125 /* Use the default scratch size */
128 *usage_bytes = drv_size << 10;
133 static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
134 struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes)
136 u32 fw_start_addr, fw_size, drv_start_addr, drv_size;
138 fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb);
139 fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
141 drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb);
142 drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb);
144 DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n",
150 if (amdgpu_sriov_vf(adev) &&
151 ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
152 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) {
153 /* Firmware request VRAM reservation for SR-IOV */
154 adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
155 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
156 adev->mman.fw_vram_usage_size = fw_size << 10;
159 if (amdgpu_sriov_vf(adev) &&
160 ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
161 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) {
162 /* driver request VRAM reservation for SR-IOV */
163 adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
164 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
165 adev->mman.drv_vram_usage_size = drv_size << 10;
172 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
174 struct atom_context *ctx = adev->mode_info.atom_context;
175 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
176 vram_usagebyfirmware);
177 struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1;
178 struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2;
183 if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
184 if (frev == 2 && crev == 1) {
186 (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
187 amdgpu_atomfirmware_allocate_fb_v2_1(adev,
190 } else if (frev >= 2 && crev >= 2) {
192 (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
193 amdgpu_atomfirmware_allocate_fb_v2_2(adev,
199 ctx->scratch_size_bytes = 0;
200 if (usage_bytes == 0)
201 usage_bytes = 20 * 1024;
202 /* allocate some scratch memory */
203 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
206 ctx->scratch_size_bytes = usage_bytes;
211 struct atom_integrated_system_info_v1_11 v11;
212 struct atom_integrated_system_info_v1_12 v12;
213 struct atom_integrated_system_info_v2_1 v21;
217 struct atom_umc_info_v3_1 v31;
218 struct atom_umc_info_v3_2 v32;
219 struct atom_umc_info_v3_3 v33;
220 struct atom_umc_info_v4_0 v40;
224 struct atom_vram_info_header_v2_3 v23;
225 struct atom_vram_info_header_v2_4 v24;
226 struct atom_vram_info_header_v2_5 v25;
227 struct atom_vram_info_header_v2_6 v26;
228 struct atom_vram_info_header_v3_0 v30;
232 struct atom_vram_module_v9 v9;
233 struct atom_vram_module_v10 v10;
234 struct atom_vram_module_v11 v11;
235 struct atom_vram_module_v3_0 v30;
238 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
243 if (adev->flags & AMD_IS_APU) {
244 switch (atom_mem_type) {
247 vram_type = AMDGPU_VRAM_TYPE_DDR2;
251 vram_type = AMDGPU_VRAM_TYPE_DDR3;
254 vram_type = AMDGPU_VRAM_TYPE_DDR4;
257 vram_type = AMDGPU_VRAM_TYPE_LPDDR4;
260 vram_type = AMDGPU_VRAM_TYPE_DDR5;
263 vram_type = AMDGPU_VRAM_TYPE_LPDDR5;
266 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
270 switch (atom_mem_type) {
271 case ATOM_DGPU_VRAM_TYPE_GDDR5:
272 vram_type = AMDGPU_VRAM_TYPE_GDDR5;
274 case ATOM_DGPU_VRAM_TYPE_HBM2:
275 case ATOM_DGPU_VRAM_TYPE_HBM2E:
276 case ATOM_DGPU_VRAM_TYPE_HBM3:
277 vram_type = AMDGPU_VRAM_TYPE_HBM;
279 case ATOM_DGPU_VRAM_TYPE_GDDR6:
280 vram_type = AMDGPU_VRAM_TYPE_GDDR6;
283 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
293 amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
294 int *vram_width, int *vram_type,
297 struct amdgpu_mode_info *mode_info = &adev->mode_info;
299 u16 data_offset, size;
300 union igp_info *igp_info;
301 union vram_info *vram_info;
302 union vram_module *vram_module;
306 u32 mem_channel_number;
307 u32 mem_channel_width;
310 if (adev->flags & AMD_IS_APU)
311 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
312 integratedsysteminfo);
314 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
317 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
319 &frev, &crev, &data_offset)) {
320 if (adev->flags & AMD_IS_APU) {
321 igp_info = (union igp_info *)
322 (mode_info->atom_context->bios + data_offset);
328 mem_channel_number = igp_info->v11.umachannelnumber;
329 if (!mem_channel_number)
330 mem_channel_number = 1;
331 mem_type = igp_info->v11.memorytype;
332 if (mem_type == LpDdr5MemType)
333 mem_channel_width = 32;
335 mem_channel_width = 64;
337 *vram_width = mem_channel_number * mem_channel_width;
339 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
349 mem_channel_number = igp_info->v21.umachannelnumber;
350 if (!mem_channel_number)
351 mem_channel_number = 1;
352 mem_type = igp_info->v21.memorytype;
353 if (mem_type == LpDdr5MemType)
354 mem_channel_width = 32;
356 mem_channel_width = 64;
358 *vram_width = mem_channel_number * mem_channel_width;
360 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
370 vram_info = (union vram_info *)
371 (mode_info->atom_context->bios + data_offset);
372 module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
377 vram_module = (union vram_module *)vram_info->v30.vram_module;
378 mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF;
380 *vram_vendor = mem_vendor;
381 mem_type = vram_info->v30.memory_type;
383 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
384 mem_channel_number = vram_info->v30.channel_num;
385 mem_channel_width = vram_info->v30.channel_width;
387 *vram_width = mem_channel_number * (1 << mem_channel_width);
392 } else if (frev == 2) {
396 if (module_id > vram_info->v23.vram_module_num)
398 vram_module = (union vram_module *)vram_info->v23.vram_module;
399 while (i < module_id) {
400 vram_module = (union vram_module *)
401 ((u8 *)vram_module + vram_module->v9.vram_module_size);
404 mem_type = vram_module->v9.memory_type;
406 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
407 mem_channel_number = vram_module->v9.channel_num;
408 mem_channel_width = vram_module->v9.channel_width;
410 *vram_width = mem_channel_number * (1 << mem_channel_width);
411 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
413 *vram_vendor = mem_vendor;
417 if (module_id > vram_info->v24.vram_module_num)
419 vram_module = (union vram_module *)vram_info->v24.vram_module;
420 while (i < module_id) {
421 vram_module = (union vram_module *)
422 ((u8 *)vram_module + vram_module->v10.vram_module_size);
425 mem_type = vram_module->v10.memory_type;
427 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
428 mem_channel_number = vram_module->v10.channel_num;
429 mem_channel_width = vram_module->v10.channel_width;
431 *vram_width = mem_channel_number * (1 << mem_channel_width);
432 mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
434 *vram_vendor = mem_vendor;
438 if (module_id > vram_info->v25.vram_module_num)
440 vram_module = (union vram_module *)vram_info->v25.vram_module;
441 while (i < module_id) {
442 vram_module = (union vram_module *)
443 ((u8 *)vram_module + vram_module->v11.vram_module_size);
446 mem_type = vram_module->v11.memory_type;
448 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
449 mem_channel_number = vram_module->v11.channel_num;
450 mem_channel_width = vram_module->v11.channel_width;
452 *vram_width = mem_channel_number * (1 << mem_channel_width);
453 mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
455 *vram_vendor = mem_vendor;
459 if (module_id > vram_info->v26.vram_module_num)
461 vram_module = (union vram_module *)vram_info->v26.vram_module;
462 while (i < module_id) {
463 vram_module = (union vram_module *)
464 ((u8 *)vram_module + vram_module->v9.vram_module_size);
467 mem_type = vram_module->v9.memory_type;
469 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
470 mem_channel_number = vram_module->v9.channel_num;
471 mem_channel_width = vram_module->v9.channel_width;
473 *vram_width = mem_channel_number * (1 << mem_channel_width);
474 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
476 *vram_vendor = mem_vendor;
493 * Return true if vbios enabled ecc by default, if umc info table is available
494 * or false if ecc is not enabled or umc info table is not available
496 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
498 struct amdgpu_mode_info *mode_info = &adev->mode_info;
500 u16 data_offset, size;
501 union umc_info *umc_info;
503 bool ecc_default_enabled = false;
507 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
510 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
511 index, &size, &frev, &crev, &data_offset)) {
512 umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
516 umc_config = le32_to_cpu(umc_info->v31.umc_config);
517 ecc_default_enabled =
518 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
521 umc_config = le32_to_cpu(umc_info->v32.umc_config);
522 ecc_default_enabled =
523 (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
526 umc_config = le32_to_cpu(umc_info->v33.umc_config);
527 umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
528 ecc_default_enabled =
529 ((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
530 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
533 /* unsupported crev */
536 } else if (frev == 4) {
539 umc_config1 = le32_to_cpu(umc_info->v40.umc_config1);
540 ecc_default_enabled =
541 (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false;
544 /* unsupported crev */
548 /* unsupported frev */
553 return ecc_default_enabled;
557 * Helper function to query sram ecc capablity
559 * @adev: amdgpu_device pointer
561 * Return true if vbios supports sram ecc or false if not
563 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
567 fw_cap = adev->mode_info.firmware_flags;
569 return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
573 * Helper function to query dynamic boot config capability
575 * @adev: amdgpu_device pointer
577 * Return true if vbios supports dynamic boot config or false if not
579 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev)
583 fw_cap = adev->mode_info.firmware_flags;
585 return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
589 * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
590 * @adev: amdgpu_device pointer
591 * @i2c_address: pointer to u8; if not NULL, will contain
592 * the RAS EEPROM address if the function returns true
594 * Return true if VBIOS supports RAS EEPROM address reporting,
595 * else return false. If true and @i2c_address is not NULL,
596 * will contain the RAS ROM address.
598 bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
601 struct amdgpu_mode_info *mode_info = &adev->mode_info;
603 u16 data_offset, size;
604 union firmware_info *firmware_info;
607 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
610 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
611 index, &size, &frev, &crev,
613 /* support firmware_info 3.4 + */
614 if ((frev == 3 && crev >= 4) || (frev > 3)) {
615 firmware_info = (union firmware_info *)
616 (mode_info->atom_context->bios + data_offset);
617 /* The ras_rom_i2c_slave_addr should ideally
618 * be a 19-bit EEPROM address, which would be
619 * used as is by the driver; see top of
622 * When this is the case, 0 is of course a
623 * valid RAS EEPROM address, in which case,
624 * we'll drop the first "if (firm...)" and only
625 * leave the check for the pointer.
627 * The reason this works right now is because
628 * ras_rom_i2c_slave_addr contains the EEPROM
629 * device type qualifier 1010b in the top 4
632 if (firmware_info->v34.ras_rom_i2c_slave_addr) {
634 *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
645 struct atom_smu_info_v3_1 v31;
646 struct atom_smu_info_v4_0 v40;
650 struct atom_gfx_info_v2_2 v22;
651 struct atom_gfx_info_v2_4 v24;
652 struct atom_gfx_info_v2_7 v27;
653 struct atom_gfx_info_v3_0 v30;
656 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
658 struct amdgpu_mode_info *mode_info = &adev->mode_info;
659 struct amdgpu_pll *spll = &adev->clock.spll;
660 struct amdgpu_pll *mpll = &adev->clock.mpll;
662 uint16_t data_offset;
663 int ret = -EINVAL, index;
665 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
667 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
668 &frev, &crev, &data_offset)) {
669 union firmware_info *firmware_info =
670 (union firmware_info *)(mode_info->atom_context->bios +
673 adev->clock.default_sclk =
674 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
675 adev->clock.default_mclk =
676 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
678 adev->pm.current_sclk = adev->clock.default_sclk;
679 adev->pm.current_mclk = adev->clock.default_mclk;
684 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
686 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
687 &frev, &crev, &data_offset)) {
688 union smu_info *smu_info =
689 (union smu_info *)(mode_info->atom_context->bios +
694 spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
696 spll->reference_freq = le32_to_cpu(smu_info->v40.core_refclk_10khz);
698 spll->reference_div = 0;
699 spll->min_post_div = 1;
700 spll->max_post_div = 1;
701 spll->min_ref_div = 2;
702 spll->max_ref_div = 0xff;
703 spll->min_feedback_div = 4;
704 spll->max_feedback_div = 0xff;
710 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
712 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
713 &frev, &crev, &data_offset)) {
714 union umc_info *umc_info =
715 (union umc_info *)(mode_info->atom_context->bios +
719 mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
721 mpll->reference_div = 0;
722 mpll->min_post_div = 1;
723 mpll->max_post_div = 1;
724 mpll->min_ref_div = 2;
725 mpll->max_ref_div = 0xff;
726 mpll->min_feedback_div = 4;
727 mpll->max_feedback_div = 0xff;
733 /* if asic is Navi+, the rlc reference clock is used for system clock
734 * from vbios gfx_info table */
735 if (adev->asic_type >= CHIP_NAVI10) {
736 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
738 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
739 &frev, &crev, &data_offset)) {
740 union gfx_info *gfx_info = (union gfx_info *)
741 (mode_info->atom_context->bios + data_offset);
743 (frev == 2 && crev == 6)) {
744 spll->reference_freq = le32_to_cpu(gfx_info->v30.golden_tsc_count_lower_refclk);
746 } else if ((frev == 2) &&
749 spll->reference_freq = le32_to_cpu(gfx_info->v22.rlc_gpu_timer_refclk);
760 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
762 struct amdgpu_mode_info *mode_info = &adev->mode_info;
765 uint16_t data_offset;
767 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
769 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
770 &frev, &crev, &data_offset)) {
771 union gfx_info *gfx_info = (union gfx_info *)
772 (mode_info->atom_context->bios + data_offset);
776 adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
777 adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
778 adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
779 adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
780 adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
781 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
782 adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
783 adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
784 adev->gfx.config.gs_prim_buffer_depth =
785 le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
786 adev->gfx.config.double_offchip_lds_buf =
787 gfx_info->v24.gc_double_offchip_lds_buffer;
788 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
789 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
790 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
791 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
794 adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
795 adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
796 adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
797 adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
798 adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
799 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
800 adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
801 adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
802 adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
803 adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
804 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
805 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
806 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
807 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
812 } else if (frev == 3) {
815 adev->gfx.config.max_shader_engines = gfx_info->v30.max_shader_engines;
816 adev->gfx.config.max_cu_per_sh = gfx_info->v30.max_cu_per_sh;
817 adev->gfx.config.max_sh_per_se = gfx_info->v30.max_sh_per_se;
818 adev->gfx.config.max_backends_per_se = gfx_info->v30.max_backends_per_se;
819 adev->gfx.config.max_texture_channel_caches = gfx_info->v30.max_texture_channel_caches;
833 * Helper function to query two stage mem training capability
835 * @adev: amdgpu_device pointer
837 * Return true if two stage mem training is supported or false if not
839 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev)
843 fw_cap = adev->mode_info.firmware_flags;
845 return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false;
848 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
850 struct atom_context *ctx = adev->mode_info.atom_context;
851 union firmware_info *firmware_info;
853 u16 data_offset, size;
855 int fw_reserved_fb_size;
857 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
860 if (!amdgpu_atom_parse_data_header(ctx, index, &size,
861 &frev, &crev, &data_offset))
862 /* fail to parse data_header */
865 firmware_info = (union firmware_info *)(ctx->bios + data_offset);
872 fw_reserved_fb_size =
873 (firmware_info->v34.fw_reserved_size_in_kb << 10);
876 fw_reserved_fb_size = 0;
880 return fw_reserved_fb_size;
884 * Helper function to execute asic_init table
886 * @adev: amdgpu_device pointer
887 * @fb_reset: flag to indicate whether fb is reset or not
889 * Return 0 if succeed, otherwise failed
891 int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
893 struct amdgpu_mode_info *mode_info = &adev->mode_info;
894 struct atom_context *ctx;
896 uint16_t data_offset;
897 uint32_t bootup_sclk_in10khz, bootup_mclk_in10khz;
898 struct asic_init_ps_allocation_v2_1 asic_init_ps_v2_1;
904 ctx = mode_info->atom_context;
908 /* query bootup sclk/mclk from firmware_info table */
909 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
911 if (amdgpu_atom_parse_data_header(ctx, index, NULL,
912 &frev, &crev, &data_offset)) {
913 union firmware_info *firmware_info =
914 (union firmware_info *)(ctx->bios +
917 bootup_sclk_in10khz =
918 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
919 bootup_mclk_in10khz =
920 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
925 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
927 if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) {
928 if (frev == 2 && crev >= 1) {
929 memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1));
930 asic_init_ps_v2_1.param.engineparam.sclkfreqin10khz = bootup_sclk_in10khz;
931 asic_init_ps_v2_1.param.memparam.mclkfreqin10khz = bootup_mclk_in10khz;
932 asic_init_ps_v2_1.param.engineparam.engineflag = b3NORMAL_ENGINE_INIT;
934 asic_init_ps_v2_1.param.memparam.memflag = b3DRAM_SELF_REFRESH_EXIT;
936 asic_init_ps_v2_1.param.memparam.memflag = 0;
944 return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1,
945 sizeof(asic_init_ps_v2_1));