2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <drm/amdgpu_drm.h>
26 #include "atomfirmware.h"
27 #include "amdgpu_atomfirmware.h"
30 #include "soc15_hw_ip.h"
32 bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
34 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
38 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
39 NULL, NULL, &data_offset)) {
40 struct atom_firmware_info_v3_1 *firmware_info =
41 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
44 if (le32_to_cpu(firmware_info->firmware_capability) &
45 ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION)
51 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
53 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
57 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
58 NULL, NULL, &data_offset)) {
59 struct atom_firmware_info_v3_1 *firmware_info =
60 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
63 adev->bios_scratch_reg_offset =
64 le32_to_cpu(firmware_info->bios_scratch_reg_startaddr);
68 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
70 struct atom_context *ctx = adev->mode_info.atom_context;
71 int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
72 vram_usagebyfirmware);
73 struct vram_usagebyfirmware_v2_1 * firmware_usage;
74 uint32_t start_addr, size;
78 if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
79 firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
80 DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
81 le32_to_cpu(firmware_usage->start_address_in_kb),
82 le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
83 le16_to_cpu(firmware_usage->used_by_driver_in_kb));
85 start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
86 size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
88 if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
89 (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
90 ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
91 /* Firmware request VRAM reservation for SR-IOV */
92 adev->fw_vram_usage.start_offset = (start_addr &
93 (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
94 adev->fw_vram_usage.size = size << 10;
95 /* Use the default scratch size */
98 usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
101 ctx->scratch_size_bytes = 0;
102 if (usage_bytes == 0)
103 usage_bytes = 20 * 1024;
104 /* allocate some scratch memory */
105 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
108 ctx->scratch_size_bytes = usage_bytes;
113 struct atom_integrated_system_info_v1_11 v11;
117 struct atom_umc_info_v3_1 v31;
121 struct atom_vram_info_header_v2_3 v23;
122 struct atom_vram_info_header_v2_4 v24;
126 struct atom_vram_module_v9 v9;
127 struct atom_vram_module_v10 v10;
130 static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
135 if (adev->flags & AMD_IS_APU) {
136 switch (atom_mem_type) {
139 vram_type = AMDGPU_VRAM_TYPE_DDR2;
143 vram_type = AMDGPU_VRAM_TYPE_DDR3;
147 vram_type = AMDGPU_VRAM_TYPE_DDR4;
150 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
154 switch (atom_mem_type) {
155 case ATOM_DGPU_VRAM_TYPE_GDDR5:
156 vram_type = AMDGPU_VRAM_TYPE_GDDR5;
158 case ATOM_DGPU_VRAM_TYPE_HBM2:
159 vram_type = AMDGPU_VRAM_TYPE_HBM;
161 case ATOM_DGPU_VRAM_TYPE_GDDR6:
162 vram_type = AMDGPU_VRAM_TYPE_GDDR6;
165 vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
175 amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
176 int *vram_width, int *vram_type,
179 struct amdgpu_mode_info *mode_info = &adev->mode_info;
181 u16 data_offset, size;
182 union igp_info *igp_info;
183 union vram_info *vram_info;
184 union vram_module *vram_module;
188 u32 mem_channel_number;
189 u32 mem_channel_width;
192 if (adev->flags & AMD_IS_APU)
193 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
194 integratedsysteminfo);
196 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
199 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
201 &frev, &crev, &data_offset)) {
202 if (adev->flags & AMD_IS_APU) {
203 igp_info = (union igp_info *)
204 (mode_info->atom_context->bios + data_offset);
207 mem_channel_number = igp_info->v11.umachannelnumber;
208 /* channel width is 64 */
210 *vram_width = mem_channel_number * 64;
211 mem_type = igp_info->v11.memorytype;
213 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
219 vram_info = (union vram_info *)
220 (mode_info->atom_context->bios + data_offset);
221 module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
224 if (module_id > vram_info->v23.vram_module_num)
226 vram_module = (union vram_module *)vram_info->v23.vram_module;
227 while (i < module_id) {
228 vram_module = (union vram_module *)
229 ((u8 *)vram_module + vram_module->v9.vram_module_size);
232 mem_type = vram_module->v9.memory_type;
234 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
235 mem_channel_number = vram_module->v9.channel_num;
236 mem_channel_width = vram_module->v9.channel_width;
238 *vram_width = mem_channel_number * (1 << mem_channel_width);
239 mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
241 *vram_vendor = mem_vendor;
244 if (module_id > vram_info->v24.vram_module_num)
246 vram_module = (union vram_module *)vram_info->v24.vram_module;
247 while (i < module_id) {
248 vram_module = (union vram_module *)
249 ((u8 *)vram_module + vram_module->v10.vram_module_size);
252 mem_type = vram_module->v10.memory_type;
254 *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
255 mem_channel_number = vram_module->v10.channel_num;
256 mem_channel_width = vram_module->v10.channel_width;
258 *vram_width = mem_channel_number * (1 << mem_channel_width);
259 mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
261 *vram_vendor = mem_vendor;
274 * Return true if vbios enabled ecc by default, if umc info table is available
275 * or false if ecc is not enabled or umc info table is not available
277 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
279 struct amdgpu_mode_info *mode_info = &adev->mode_info;
281 u16 data_offset, size;
282 union umc_info *umc_info;
284 bool ecc_default_enabled = false;
286 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
289 if (amdgpu_atom_parse_data_header(mode_info->atom_context,
290 index, &size, &frev, &crev, &data_offset)) {
291 /* support umc_info 3.1+ */
292 if ((frev == 3 && crev >= 1) || (frev > 3)) {
293 umc_info = (union umc_info *)
294 (mode_info->atom_context->bios + data_offset);
295 ecc_default_enabled =
296 (le32_to_cpu(umc_info->v31.umc_config) &
297 UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
301 return ecc_default_enabled;
304 union firmware_info {
305 struct atom_firmware_info_v3_1 v31;
309 * Return true if vbios supports sram ecc or false if not
311 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
313 struct amdgpu_mode_info *mode_info = &adev->mode_info;
315 u16 data_offset, size;
316 union firmware_info *firmware_info;
318 bool sram_ecc_supported = false;
320 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
323 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
324 index, &size, &frev, &crev, &data_offset)) {
325 /* support firmware_info 3.1 + */
326 if ((frev == 3 && crev >=1) || (frev > 3)) {
327 firmware_info = (union firmware_info *)
328 (mode_info->atom_context->bios + data_offset);
330 (le32_to_cpu(firmware_info->v31.firmware_capability) &
331 ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
335 return sram_ecc_supported;
339 struct atom_smu_info_v3_1 v31;
342 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
344 struct amdgpu_mode_info *mode_info = &adev->mode_info;
345 struct amdgpu_pll *spll = &adev->clock.spll;
346 struct amdgpu_pll *mpll = &adev->clock.mpll;
348 uint16_t data_offset;
349 int ret = -EINVAL, index;
351 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
353 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
354 &frev, &crev, &data_offset)) {
355 union firmware_info *firmware_info =
356 (union firmware_info *)(mode_info->atom_context->bios +
359 adev->clock.default_sclk =
360 le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
361 adev->clock.default_mclk =
362 le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
364 adev->pm.current_sclk = adev->clock.default_sclk;
365 adev->pm.current_mclk = adev->clock.default_mclk;
367 /* not technically a clock, but... */
368 adev->mode_info.firmware_flags =
369 le32_to_cpu(firmware_info->v31.firmware_capability);
374 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
376 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
377 &frev, &crev, &data_offset)) {
378 union smu_info *smu_info =
379 (union smu_info *)(mode_info->atom_context->bios +
383 spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
385 spll->reference_div = 0;
386 spll->min_post_div = 1;
387 spll->max_post_div = 1;
388 spll->min_ref_div = 2;
389 spll->max_ref_div = 0xff;
390 spll->min_feedback_div = 4;
391 spll->max_feedback_div = 0xff;
397 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
399 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
400 &frev, &crev, &data_offset)) {
401 union umc_info *umc_info =
402 (union umc_info *)(mode_info->atom_context->bios +
406 mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
408 mpll->reference_div = 0;
409 mpll->min_post_div = 1;
410 mpll->max_post_div = 1;
411 mpll->min_ref_div = 2;
412 mpll->max_ref_div = 0xff;
413 mpll->min_feedback_div = 4;
414 mpll->max_feedback_div = 0xff;
424 struct atom_gfx_info_v2_4 v24;
427 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
429 struct amdgpu_mode_info *mode_info = &adev->mode_info;
432 uint16_t data_offset;
434 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
436 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
437 &frev, &crev, &data_offset)) {
438 union gfx_info *gfx_info = (union gfx_info *)
439 (mode_info->atom_context->bios + data_offset);
442 adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
443 adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
444 adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
445 adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
446 adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
447 adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
448 adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
449 adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
450 adev->gfx.config.gs_prim_buffer_depth =
451 le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
452 adev->gfx.config.double_offchip_lds_buf =
453 gfx_info->v24.gc_double_offchip_lds_buffer;
454 adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
455 adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
456 adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
457 adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
468 * Check if VBIOS supports GDDR6 training data save/restore
470 static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev)
472 uint16_t data_offset;
475 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
477 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
478 NULL, NULL, &data_offset)) {
479 struct atom_firmware_info_v3_1 *firmware_info =
480 (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
483 DRM_DEBUG("atom firmware capability:0x%08x.\n",
484 le32_to_cpu(firmware_info->firmware_capability));
486 if (le32_to_cpu(firmware_info->firmware_capability) &
487 ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING)
494 static int gddr6_mem_train_support(struct amdgpu_device *adev)
497 uint32_t major, minor, revision, hw_v;
499 if (gddr6_mem_train_vbios_support(adev)) {
500 amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision);
501 hw_v = HW_REV(major, minor, revision);
503 * treat 0 revision as a special case since register for MP0 and MMHUB is missing
504 * for some Navi10 A0, preventing driver from discovering the hwip information since
505 * none of the functions will be initialized, it should not cause any problems
508 case HW_REV(11, 0, 0):
509 case HW_REV(11, 0, 5):
513 DRM_ERROR("memory training vbios supports but psp hw(%08x)"
514 " doesn't support!\n", hw_v);
524 DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret);
528 int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev)
530 struct atom_context *ctx = adev->mode_info.atom_context;
531 unsigned char *bios = ctx->bios;
532 struct vram_reserve_block *reserved_block;
533 int index, block_number;
535 uint16_t data_offset, size;
536 uint32_t start_address_in_kb;
540 adev->fw_vram_usage.mem_train_support = false;
542 if (adev->asic_type != CHIP_NAVI10 &&
543 adev->asic_type != CHIP_NAVI14)
546 if (amdgpu_sriov_vf(adev))
549 ret = gddr6_mem_train_support(adev);
555 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
556 vram_usagebyfirmware);
557 ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev,
560 DRM_ERROR("parse data header failed.\n");
564 DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x,"
565 " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset);
566 /* only support 2.1+ */
567 if (((uint16_t)frev << 8 | crev) < 0x0201) {
568 DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev);
572 reserved_block = (struct vram_reserve_block *)
573 (bios + data_offset + sizeof(struct atom_common_table_header));
574 block_number = ((unsigned int)size - sizeof(struct atom_common_table_header))
575 / sizeof(struct vram_reserve_block);
576 reserved_block += (block_number > 0) ? block_number-1 : 0;
577 DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n",
579 le32_to_cpu(reserved_block->start_address_in_kb),
580 le16_to_cpu(reserved_block->used_by_firmware_in_kb),
581 le16_to_cpu(reserved_block->used_by_driver_in_kb));
582 if (reserved_block->used_by_firmware_in_kb > 0) {
583 start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb);
584 offset = (uint64_t)start_address_in_kb * ONE_KiB;
585 if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) {
589 offset &= ~(ONE_MiB - 1);
590 adev->fw_vram_usage.mem_train_fb_loc = offset;
591 adev->fw_vram_usage.mem_train_support = true;
592 DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset);
595 DRM_ERROR("used_by_firmware_in_kb is 0!\n");