2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_sched.h"
32 #include "amdgpu_uvd.h"
33 #include "amdgpu_vce.h"
36 #include <linux/vga_switcheroo.h>
37 #include <linux/slab.h>
38 #include <linux/pm_runtime.h>
39 #include "amdgpu_amdkfd.h"
42 * amdgpu_driver_unload_kms - Main unload function for KMS.
44 * @dev: drm dev pointer
46 * This is the main unload function for KMS (all asics).
47 * Returns 0 on success.
49 void amdgpu_driver_unload_kms(struct drm_device *dev)
51 struct amdgpu_device *adev = dev->dev_private;
56 if (adev->rmmio == NULL)
59 if (amdgpu_sriov_vf(adev))
60 amdgpu_virt_request_full_gpu(adev, false);
62 if (amdgpu_device_is_px(dev)) {
63 pm_runtime_get_sync(dev->dev);
64 pm_runtime_forbid(dev->dev);
67 amdgpu_acpi_fini(adev);
69 amdgpu_device_fini(adev);
73 dev->dev_private = NULL;
77 * amdgpu_driver_load_kms - Main load function for KMS.
79 * @dev: drm dev pointer
80 * @flags: device flags
82 * This is the main load function for KMS (all asics).
83 * Returns 0 on success, error on failure.
85 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
87 struct amdgpu_device *adev;
90 #ifdef CONFIG_DRM_AMDGPU_SI
91 if (!amdgpu_si_support) {
92 switch (flags & AMD_ASIC_MASK) {
99 "SI support provided by radeon.\n");
101 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
107 #ifdef CONFIG_DRM_AMDGPU_CIK
108 if (!amdgpu_cik_support) {
109 switch (flags & AMD_ASIC_MASK) {
116 "CIK support provided by radeon.\n");
118 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
125 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
129 dev->dev_private = (void *)adev;
131 if ((amdgpu_runtime_pm != 0) &&
133 (amdgpu_is_atpx_hybrid() ||
134 amdgpu_has_atpx_dgpu_power_cntl()) &&
135 ((flags & AMD_IS_APU) == 0) &&
136 !pci_is_thunderbolt_attached(dev->pdev))
139 /* amdgpu_device_init should report only fatal error
140 * like memory allocation failure or iomapping failure,
141 * or memory manager initialization failure, it must
142 * properly initialize the GPU MC controller and permit
145 r = amdgpu_device_init(adev, dev, dev->pdev, flags);
147 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
151 /* Call ACPI methods: require modeset init
152 * but failure is not fatal
155 acpi_status = amdgpu_acpi_init(adev);
157 dev_dbg(&dev->pdev->dev,
158 "Error during ACPI methods call\n");
161 if (amdgpu_device_is_px(dev)) {
162 pm_runtime_use_autosuspend(dev->dev);
163 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
164 pm_runtime_set_active(dev->dev);
165 pm_runtime_allow(dev->dev);
166 pm_runtime_mark_last_busy(dev->dev);
167 pm_runtime_put_autosuspend(dev->dev);
172 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
173 if (adev->rmmio && amdgpu_device_is_px(dev))
174 pm_runtime_put_noidle(dev->dev);
175 amdgpu_driver_unload_kms(dev);
181 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
182 struct drm_amdgpu_query_fw *query_fw,
183 struct amdgpu_device *adev)
185 switch (query_fw->fw_type) {
186 case AMDGPU_INFO_FW_VCE:
187 fw_info->ver = adev->vce.fw_version;
188 fw_info->feature = adev->vce.fb_version;
190 case AMDGPU_INFO_FW_UVD:
191 fw_info->ver = adev->uvd.fw_version;
192 fw_info->feature = 0;
194 case AMDGPU_INFO_FW_VCN:
195 fw_info->ver = adev->vcn.fw_version;
196 fw_info->feature = 0;
198 case AMDGPU_INFO_FW_GMC:
199 fw_info->ver = adev->gmc.fw_version;
200 fw_info->feature = 0;
202 case AMDGPU_INFO_FW_GFX_ME:
203 fw_info->ver = adev->gfx.me_fw_version;
204 fw_info->feature = adev->gfx.me_feature_version;
206 case AMDGPU_INFO_FW_GFX_PFP:
207 fw_info->ver = adev->gfx.pfp_fw_version;
208 fw_info->feature = adev->gfx.pfp_feature_version;
210 case AMDGPU_INFO_FW_GFX_CE:
211 fw_info->ver = adev->gfx.ce_fw_version;
212 fw_info->feature = adev->gfx.ce_feature_version;
214 case AMDGPU_INFO_FW_GFX_RLC:
215 fw_info->ver = adev->gfx.rlc_fw_version;
216 fw_info->feature = adev->gfx.rlc_feature_version;
218 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
219 fw_info->ver = adev->gfx.rlc_srlc_fw_version;
220 fw_info->feature = adev->gfx.rlc_srlc_feature_version;
222 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
223 fw_info->ver = adev->gfx.rlc_srlg_fw_version;
224 fw_info->feature = adev->gfx.rlc_srlg_feature_version;
226 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
227 fw_info->ver = adev->gfx.rlc_srls_fw_version;
228 fw_info->feature = adev->gfx.rlc_srls_feature_version;
230 case AMDGPU_INFO_FW_GFX_MEC:
231 if (query_fw->index == 0) {
232 fw_info->ver = adev->gfx.mec_fw_version;
233 fw_info->feature = adev->gfx.mec_feature_version;
234 } else if (query_fw->index == 1) {
235 fw_info->ver = adev->gfx.mec2_fw_version;
236 fw_info->feature = adev->gfx.mec2_feature_version;
240 case AMDGPU_INFO_FW_SMC:
241 fw_info->ver = adev->pm.fw_version;
242 fw_info->feature = 0;
244 case AMDGPU_INFO_FW_SDMA:
245 if (query_fw->index >= adev->sdma.num_instances)
247 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
248 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
250 case AMDGPU_INFO_FW_SOS:
251 fw_info->ver = adev->psp.sos_fw_version;
252 fw_info->feature = adev->psp.sos_feature_version;
254 case AMDGPU_INFO_FW_ASD:
255 fw_info->ver = adev->psp.asd_fw_version;
256 fw_info->feature = adev->psp.asd_feature_version;
265 * Userspace get information ioctl
268 * amdgpu_info_ioctl - answer a device specific request.
270 * @adev: amdgpu device pointer
271 * @data: request object
274 * This function is used to pass device specific parameters to the userspace
275 * drivers. Examples include: pci device id, pipeline parms, tiling params,
277 * Returns 0 on success, -EINVAL on failure.
279 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
281 struct amdgpu_device *adev = dev->dev_private;
282 struct drm_amdgpu_info *info = data;
283 struct amdgpu_mode_info *minfo = &adev->mode_info;
284 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
285 uint32_t size = info->return_size;
286 struct drm_crtc *crtc;
290 int ui32_size = sizeof(ui32);
292 if (!info->return_size || !info->return_pointer)
295 /* Ensure IB tests are run on ring */
296 flush_delayed_work(&adev->late_init_work);
298 switch (info->query) {
299 case AMDGPU_INFO_ACCEL_WORKING:
300 ui32 = adev->accel_working;
301 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
302 case AMDGPU_INFO_CRTC_FROM_ID:
303 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
304 crtc = (struct drm_crtc *)minfo->crtcs[i];
305 if (crtc && crtc->base.id == info->mode_crtc.id) {
306 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
307 ui32 = amdgpu_crtc->crtc_id;
313 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
316 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
317 case AMDGPU_INFO_HW_IP_INFO: {
318 struct drm_amdgpu_info_hw_ip ip = {};
319 enum amd_ip_block_type type;
320 uint32_t ring_mask = 0;
321 uint32_t ib_start_alignment = 0;
322 uint32_t ib_size_alignment = 0;
324 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
327 switch (info->query_hw_ip.type) {
328 case AMDGPU_HW_IP_GFX:
329 type = AMD_IP_BLOCK_TYPE_GFX;
330 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
331 ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
332 ib_start_alignment = 32;
333 ib_size_alignment = 32;
335 case AMDGPU_HW_IP_COMPUTE:
336 type = AMD_IP_BLOCK_TYPE_GFX;
337 for (i = 0; i < adev->gfx.num_compute_rings; i++)
338 ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
339 ib_start_alignment = 32;
340 ib_size_alignment = 32;
342 case AMDGPU_HW_IP_DMA:
343 type = AMD_IP_BLOCK_TYPE_SDMA;
344 for (i = 0; i < adev->sdma.num_instances; i++)
345 ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
346 ib_start_alignment = 256;
347 ib_size_alignment = 4;
349 case AMDGPU_HW_IP_UVD:
350 type = AMD_IP_BLOCK_TYPE_UVD;
351 for (i = 0; i < adev->uvd.num_uvd_inst; i++)
352 ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
353 ib_start_alignment = 64;
354 ib_size_alignment = 64;
356 case AMDGPU_HW_IP_VCE:
357 type = AMD_IP_BLOCK_TYPE_VCE;
358 for (i = 0; i < adev->vce.num_rings; i++)
359 ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
360 ib_start_alignment = 4;
361 ib_size_alignment = 1;
363 case AMDGPU_HW_IP_UVD_ENC:
364 type = AMD_IP_BLOCK_TYPE_UVD;
365 for (i = 0; i < adev->uvd.num_uvd_inst; i++)
366 for (j = 0; j < adev->uvd.num_enc_rings; j++)
368 ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
369 (j + i * adev->uvd.num_enc_rings));
370 ib_start_alignment = 64;
371 ib_size_alignment = 64;
373 case AMDGPU_HW_IP_VCN_DEC:
374 type = AMD_IP_BLOCK_TYPE_VCN;
375 ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
376 ib_start_alignment = 16;
377 ib_size_alignment = 16;
379 case AMDGPU_HW_IP_VCN_ENC:
380 type = AMD_IP_BLOCK_TYPE_VCN;
381 for (i = 0; i < adev->vcn.num_enc_rings; i++)
382 ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
383 ib_start_alignment = 64;
384 ib_size_alignment = 1;
386 case AMDGPU_HW_IP_VCN_JPEG:
387 type = AMD_IP_BLOCK_TYPE_VCN;
388 ring_mask = adev->vcn.ring_jpeg.ready ? 1 : 0;
389 ib_start_alignment = 16;
390 ib_size_alignment = 16;
396 for (i = 0; i < adev->num_ip_blocks; i++) {
397 if (adev->ip_blocks[i].version->type == type &&
398 adev->ip_blocks[i].status.valid) {
399 ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
400 ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
401 ip.capabilities_flags = 0;
402 ip.available_rings = ring_mask;
403 ip.ib_start_alignment = ib_start_alignment;
404 ip.ib_size_alignment = ib_size_alignment;
408 return copy_to_user(out, &ip,
409 min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
411 case AMDGPU_INFO_HW_IP_COUNT: {
412 enum amd_ip_block_type type;
415 switch (info->query_hw_ip.type) {
416 case AMDGPU_HW_IP_GFX:
417 type = AMD_IP_BLOCK_TYPE_GFX;
419 case AMDGPU_HW_IP_COMPUTE:
420 type = AMD_IP_BLOCK_TYPE_GFX;
422 case AMDGPU_HW_IP_DMA:
423 type = AMD_IP_BLOCK_TYPE_SDMA;
425 case AMDGPU_HW_IP_UVD:
426 type = AMD_IP_BLOCK_TYPE_UVD;
428 case AMDGPU_HW_IP_VCE:
429 type = AMD_IP_BLOCK_TYPE_VCE;
431 case AMDGPU_HW_IP_UVD_ENC:
432 type = AMD_IP_BLOCK_TYPE_UVD;
434 case AMDGPU_HW_IP_VCN_DEC:
435 case AMDGPU_HW_IP_VCN_ENC:
436 case AMDGPU_HW_IP_VCN_JPEG:
437 type = AMD_IP_BLOCK_TYPE_VCN;
443 for (i = 0; i < adev->num_ip_blocks; i++)
444 if (adev->ip_blocks[i].version->type == type &&
445 adev->ip_blocks[i].status.valid &&
446 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
449 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
451 case AMDGPU_INFO_TIMESTAMP:
452 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
453 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
454 case AMDGPU_INFO_FW_VERSION: {
455 struct drm_amdgpu_info_firmware fw_info;
458 /* We only support one instance of each IP block right now. */
459 if (info->query_fw.ip_instance != 0)
462 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
466 return copy_to_user(out, &fw_info,
467 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
469 case AMDGPU_INFO_NUM_BYTES_MOVED:
470 ui64 = atomic64_read(&adev->num_bytes_moved);
471 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
472 case AMDGPU_INFO_NUM_EVICTIONS:
473 ui64 = atomic64_read(&adev->num_evictions);
474 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
475 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
476 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
477 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
478 case AMDGPU_INFO_VRAM_USAGE:
479 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
480 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
481 case AMDGPU_INFO_VIS_VRAM_USAGE:
482 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
483 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
484 case AMDGPU_INFO_GTT_USAGE:
485 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
486 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
487 case AMDGPU_INFO_GDS_CONFIG: {
488 struct drm_amdgpu_info_gds gds_info;
490 memset(&gds_info, 0, sizeof(gds_info));
491 gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
492 gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
493 gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
494 gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
495 gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
496 gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
497 gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
498 return copy_to_user(out, &gds_info,
499 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
501 case AMDGPU_INFO_VRAM_GTT: {
502 struct drm_amdgpu_info_vram_gtt vram_gtt;
504 vram_gtt.vram_size = adev->gmc.real_vram_size -
505 atomic64_read(&adev->vram_pin_size);
506 vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size -
507 atomic64_read(&adev->visible_pin_size);
508 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
509 vram_gtt.gtt_size *= PAGE_SIZE;
510 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
511 return copy_to_user(out, &vram_gtt,
512 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
514 case AMDGPU_INFO_MEMORY: {
515 struct drm_amdgpu_memory_info mem;
517 memset(&mem, 0, sizeof(mem));
518 mem.vram.total_heap_size = adev->gmc.real_vram_size;
519 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
520 atomic64_read(&adev->vram_pin_size);
521 mem.vram.heap_usage =
522 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
523 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
525 mem.cpu_accessible_vram.total_heap_size =
526 adev->gmc.visible_vram_size;
527 mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size -
528 atomic64_read(&adev->visible_pin_size);
529 mem.cpu_accessible_vram.heap_usage =
530 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
531 mem.cpu_accessible_vram.max_allocation =
532 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
534 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
535 mem.gtt.total_heap_size *= PAGE_SIZE;
536 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
537 atomic64_read(&adev->gart_pin_size);
539 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
540 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
542 return copy_to_user(out, &mem,
543 min((size_t)size, sizeof(mem)))
546 case AMDGPU_INFO_READ_MMR_REG: {
547 unsigned n, alloc_size;
549 unsigned se_num = (info->read_mmr_reg.instance >>
550 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
551 AMDGPU_INFO_MMR_SE_INDEX_MASK;
552 unsigned sh_num = (info->read_mmr_reg.instance >>
553 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
554 AMDGPU_INFO_MMR_SH_INDEX_MASK;
556 /* set full masks if the userspace set all bits
557 * in the bitfields */
558 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
560 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
563 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
566 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
568 for (i = 0; i < info->read_mmr_reg.count; i++)
569 if (amdgpu_asic_read_register(adev, se_num, sh_num,
570 info->read_mmr_reg.dword_offset + i,
572 DRM_DEBUG_KMS("unallowed offset %#x\n",
573 info->read_mmr_reg.dword_offset + i);
577 n = copy_to_user(out, regs, min(size, alloc_size));
579 return n ? -EFAULT : 0;
581 case AMDGPU_INFO_DEV_INFO: {
582 struct drm_amdgpu_info_device dev_info = {};
585 dev_info.device_id = dev->pdev->device;
586 dev_info.chip_rev = adev->rev_id;
587 dev_info.external_rev = adev->external_rev_id;
588 dev_info.pci_rev = dev->pdev->revision;
589 dev_info.family = adev->family;
590 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
591 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
592 /* return all clocks in KHz */
593 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
594 if (adev->pm.dpm_enabled) {
595 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
596 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
598 dev_info.max_engine_clock = adev->clock.default_sclk * 10;
599 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
601 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
602 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
603 adev->gfx.config.max_shader_engines;
604 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
606 dev_info.ids_flags = 0;
607 if (adev->flags & AMD_IS_APU)
608 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
609 if (amdgpu_sriov_vf(adev))
610 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
612 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
613 vm_size -= AMDGPU_VA_RESERVED_SIZE;
615 /* Older VCE FW versions are buggy and can handle only 40bits */
616 if (adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
617 vm_size = min(vm_size, 1ULL << 40);
619 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
620 dev_info.virtual_address_max =
621 min(vm_size, AMDGPU_VA_HOLE_START);
623 if (vm_size > AMDGPU_VA_HOLE_START) {
624 dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
625 dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
627 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
628 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
629 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
630 dev_info.cu_active_number = adev->gfx.cu_info.number;
631 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
632 dev_info.ce_ram_size = adev->gfx.ce_ram_size;
633 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
634 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
635 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
636 sizeof(adev->gfx.cu_info.bitmap));
637 dev_info.vram_type = adev->gmc.vram_type;
638 dev_info.vram_bit_width = adev->gmc.vram_width;
639 dev_info.vce_harvest_config = adev->vce.harvest_config;
640 dev_info.gc_double_offchip_lds_buf =
641 adev->gfx.config.double_offchip_lds_buf;
644 dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
645 dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
646 dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
647 dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
648 dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
649 dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
650 dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
651 dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
653 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
654 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
655 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
656 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
657 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
658 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
659 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
661 return copy_to_user(out, &dev_info,
662 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
664 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
666 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
667 struct amd_vce_state *vce_state;
669 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
670 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
672 vce_clk_table.entries[i].sclk = vce_state->sclk;
673 vce_clk_table.entries[i].mclk = vce_state->mclk;
674 vce_clk_table.entries[i].eclk = vce_state->evclk;
675 vce_clk_table.num_valid_entries++;
679 return copy_to_user(out, &vce_clk_table,
680 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
682 case AMDGPU_INFO_VBIOS: {
683 uint32_t bios_size = adev->bios_size;
685 switch (info->vbios_info.type) {
686 case AMDGPU_INFO_VBIOS_SIZE:
687 return copy_to_user(out, &bios_size,
688 min((size_t)size, sizeof(bios_size)))
690 case AMDGPU_INFO_VBIOS_IMAGE: {
692 uint32_t bios_offset = info->vbios_info.offset;
694 if (bios_offset >= bios_size)
697 bios = adev->bios + bios_offset;
698 return copy_to_user(out, bios,
699 min((size_t)size, (size_t)(bios_size - bios_offset)))
703 DRM_DEBUG_KMS("Invalid request %d\n",
704 info->vbios_info.type);
708 case AMDGPU_INFO_NUM_HANDLES: {
709 struct drm_amdgpu_info_num_handles handle;
711 switch (info->query_hw_ip.type) {
712 case AMDGPU_HW_IP_UVD:
713 /* Starting Polaris, we support unlimited UVD handles */
714 if (adev->asic_type < CHIP_POLARIS10) {
715 handle.uvd_max_handles = adev->uvd.max_handles;
716 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
718 return copy_to_user(out, &handle,
719 min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
729 case AMDGPU_INFO_SENSOR: {
730 if (!adev->pm.dpm_enabled)
733 switch (info->sensor_info.type) {
734 case AMDGPU_INFO_SENSOR_GFX_SCLK:
735 /* get sclk in Mhz */
736 if (amdgpu_dpm_read_sensor(adev,
737 AMDGPU_PP_SENSOR_GFX_SCLK,
738 (void *)&ui32, &ui32_size)) {
743 case AMDGPU_INFO_SENSOR_GFX_MCLK:
744 /* get mclk in Mhz */
745 if (amdgpu_dpm_read_sensor(adev,
746 AMDGPU_PP_SENSOR_GFX_MCLK,
747 (void *)&ui32, &ui32_size)) {
752 case AMDGPU_INFO_SENSOR_GPU_TEMP:
753 /* get temperature in millidegrees C */
754 if (amdgpu_dpm_read_sensor(adev,
755 AMDGPU_PP_SENSOR_GPU_TEMP,
756 (void *)&ui32, &ui32_size)) {
760 case AMDGPU_INFO_SENSOR_GPU_LOAD:
762 if (amdgpu_dpm_read_sensor(adev,
763 AMDGPU_PP_SENSOR_GPU_LOAD,
764 (void *)&ui32, &ui32_size)) {
768 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
769 /* get average GPU power */
770 if (amdgpu_dpm_read_sensor(adev,
771 AMDGPU_PP_SENSOR_GPU_POWER,
772 (void *)&ui32, &ui32_size)) {
777 case AMDGPU_INFO_SENSOR_VDDNB:
778 /* get VDDNB in millivolts */
779 if (amdgpu_dpm_read_sensor(adev,
780 AMDGPU_PP_SENSOR_VDDNB,
781 (void *)&ui32, &ui32_size)) {
785 case AMDGPU_INFO_SENSOR_VDDGFX:
786 /* get VDDGFX in millivolts */
787 if (amdgpu_dpm_read_sensor(adev,
788 AMDGPU_PP_SENSOR_VDDGFX,
789 (void *)&ui32, &ui32_size)) {
793 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
794 /* get stable pstate sclk in Mhz */
795 if (amdgpu_dpm_read_sensor(adev,
796 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
797 (void *)&ui32, &ui32_size)) {
802 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
803 /* get stable pstate mclk in Mhz */
804 if (amdgpu_dpm_read_sensor(adev,
805 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
806 (void *)&ui32, &ui32_size)) {
812 DRM_DEBUG_KMS("Invalid request %d\n",
813 info->sensor_info.type);
816 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
818 case AMDGPU_INFO_VRAM_LOST_COUNTER:
819 ui32 = atomic_read(&adev->vram_lost_counter);
820 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
822 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
830 * Outdated mess for old drm with Xorg being in charge (void function now).
833 * amdgpu_driver_lastclose_kms - drm callback for last close
835 * @dev: drm dev pointer
837 * Switch vga_switcheroo state after last close (all asics).
839 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
841 drm_fb_helper_lastclose(dev);
842 vga_switcheroo_process_delayed_switch();
846 * amdgpu_driver_open_kms - drm callback for open
848 * @dev: drm dev pointer
849 * @file_priv: drm file
851 * On device open, init vm on cayman+ (all asics).
852 * Returns 0 on success, error on failure.
854 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
856 struct amdgpu_device *adev = dev->dev_private;
857 struct amdgpu_fpriv *fpriv;
860 file_priv->driver_priv = NULL;
862 r = pm_runtime_get_sync(dev->dev);
866 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
867 if (unlikely(!fpriv)) {
872 pasid = amdgpu_pasid_alloc(16);
874 dev_warn(adev->dev, "No more PASIDs available!");
877 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
881 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
882 if (!fpriv->prt_va) {
887 if (amdgpu_sriov_vf(adev)) {
888 r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
893 mutex_init(&fpriv->bo_list_lock);
894 idr_init(&fpriv->bo_list_handles);
896 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
898 file_priv->driver_priv = fpriv;
902 amdgpu_vm_fini(adev, &fpriv->vm);
906 amdgpu_pasid_free(pasid);
911 pm_runtime_mark_last_busy(dev->dev);
912 pm_runtime_put_autosuspend(dev->dev);
918 * amdgpu_driver_postclose_kms - drm callback for post close
920 * @dev: drm dev pointer
921 * @file_priv: drm file
923 * On device post close, tear down vm on cayman+ (all asics).
925 void amdgpu_driver_postclose_kms(struct drm_device *dev,
926 struct drm_file *file_priv)
928 struct amdgpu_device *adev = dev->dev_private;
929 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
930 struct amdgpu_bo_list *list;
931 struct amdgpu_bo *pd;
938 pm_runtime_get_sync(dev->dev);
940 if (adev->asic_type != CHIP_RAVEN) {
941 amdgpu_uvd_free_handles(adev, file_priv);
942 amdgpu_vce_free_handles(adev, file_priv);
945 amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
947 if (amdgpu_sriov_vf(adev)) {
948 /* TODO: how to handle reserve failure */
949 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
950 amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
951 fpriv->csa_va = NULL;
952 amdgpu_bo_unreserve(adev->virt.csa_obj);
955 pasid = fpriv->vm.pasid;
956 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
958 amdgpu_vm_fini(adev, &fpriv->vm);
959 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
962 amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
963 amdgpu_bo_unref(&pd);
965 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
966 amdgpu_bo_list_free(list);
968 idr_destroy(&fpriv->bo_list_handles);
969 mutex_destroy(&fpriv->bo_list_lock);
972 file_priv->driver_priv = NULL;
974 pm_runtime_mark_last_busy(dev->dev);
975 pm_runtime_put_autosuspend(dev->dev);
979 * VBlank related functions.
982 * amdgpu_get_vblank_counter_kms - get frame count
984 * @dev: drm dev pointer
985 * @pipe: crtc to get the frame count from
987 * Gets the frame count on the requested crtc (all asics).
988 * Returns frame count on success, -EINVAL on failure.
990 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
992 struct amdgpu_device *adev = dev->dev_private;
993 int vpos, hpos, stat;
996 if (pipe >= adev->mode_info.num_crtc) {
997 DRM_ERROR("Invalid crtc %u\n", pipe);
1001 /* The hw increments its frame counter at start of vsync, not at start
1002 * of vblank, as is required by DRM core vblank counter handling.
1003 * Cook the hw count here to make it appear to the caller as if it
1004 * incremented at start of vblank. We measure distance to start of
1005 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1006 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1007 * result by 1 to give the proper appearance to caller.
1009 if (adev->mode_info.crtcs[pipe]) {
1010 /* Repeat readout if needed to provide stable result if
1011 * we cross start of vsync during the queries.
1014 count = amdgpu_display_vblank_get_counter(adev, pipe);
1015 /* Ask amdgpu_display_get_crtc_scanoutpos to return
1016 * vpos as distance to start of vblank, instead of
1017 * regular vertical scanout pos.
1019 stat = amdgpu_display_get_crtc_scanoutpos(
1020 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1021 &vpos, &hpos, NULL, NULL,
1022 &adev->mode_info.crtcs[pipe]->base.hwmode);
1023 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1025 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1026 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1027 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1029 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1032 /* Bump counter if we are at >= leading edge of vblank,
1033 * but before vsync where vpos would turn negative and
1034 * the hw counter really increments.
1040 /* Fallback to use value as is. */
1041 count = amdgpu_display_vblank_get_counter(adev, pipe);
1042 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1049 * amdgpu_enable_vblank_kms - enable vblank interrupt
1051 * @dev: drm dev pointer
1052 * @pipe: crtc to enable vblank interrupt for
1054 * Enable the interrupt on the requested crtc (all asics).
1055 * Returns 0 on success, -EINVAL on failure.
1057 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1059 struct amdgpu_device *adev = dev->dev_private;
1060 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1062 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1066 * amdgpu_disable_vblank_kms - disable vblank interrupt
1068 * @dev: drm dev pointer
1069 * @pipe: crtc to disable vblank interrupt for
1071 * Disable the interrupt on the requested crtc (all asics).
1073 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1075 struct amdgpu_device *adev = dev->dev_private;
1076 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1078 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1081 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1082 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1083 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1084 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1085 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1086 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1087 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1089 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1090 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1091 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1092 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1093 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1094 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1095 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1096 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1097 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1098 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1100 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1105 #if defined(CONFIG_DEBUG_FS)
1107 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1109 struct drm_info_node *node = (struct drm_info_node *) m->private;
1110 struct drm_device *dev = node->minor->dev;
1111 struct amdgpu_device *adev = dev->dev_private;
1112 struct drm_amdgpu_info_firmware fw_info;
1113 struct drm_amdgpu_query_fw query_fw;
1114 struct atom_context *ctx = adev->mode_info.atom_context;
1118 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1119 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1122 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1123 fw_info.feature, fw_info.ver);
1126 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1127 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1130 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1131 fw_info.feature, fw_info.ver);
1134 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1135 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1138 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1139 fw_info.feature, fw_info.ver);
1142 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1143 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1146 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1147 fw_info.feature, fw_info.ver);
1150 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1151 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1154 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1155 fw_info.feature, fw_info.ver);
1158 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1159 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1162 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1163 fw_info.feature, fw_info.ver);
1166 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1167 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1170 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1171 fw_info.feature, fw_info.ver);
1173 /* RLC SAVE RESTORE LIST CNTL */
1174 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1175 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1178 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1179 fw_info.feature, fw_info.ver);
1181 /* RLC SAVE RESTORE LIST GPM MEM */
1182 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1183 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1186 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1187 fw_info.feature, fw_info.ver);
1189 /* RLC SAVE RESTORE LIST SRM MEM */
1190 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1191 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1194 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1195 fw_info.feature, fw_info.ver);
1198 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1200 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1203 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1204 fw_info.feature, fw_info.ver);
1207 if (adev->asic_type == CHIP_KAVERI ||
1208 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1210 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1213 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1214 fw_info.feature, fw_info.ver);
1218 query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1219 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1222 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1223 fw_info.feature, fw_info.ver);
1227 query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1228 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1231 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1232 fw_info.feature, fw_info.ver);
1235 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1236 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1239 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1240 fw_info.feature, fw_info.ver);
1243 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1244 for (i = 0; i < adev->sdma.num_instances; i++) {
1246 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1249 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1250 i, fw_info.feature, fw_info.ver);
1254 query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1255 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1258 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1259 fw_info.feature, fw_info.ver);
1262 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1267 static const struct drm_info_list amdgpu_firmware_info_list[] = {
1268 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1272 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1274 #if defined(CONFIG_DEBUG_FS)
1275 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1276 ARRAY_SIZE(amdgpu_firmware_info_list));