2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include <drm/drm_debugfs.h>
31 #include <drm/amdgpu_drm.h>
32 #include "amdgpu_sched.h"
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
37 #include <linux/vga_switcheroo.h>
38 #include <linux/slab.h>
39 #include <linux/uaccess.h>
40 #include <linux/pci.h>
41 #include <linux/pm_runtime.h>
42 #include "amdgpu_amdkfd.h"
43 #include "amdgpu_gem.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ras.h"
47 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
49 struct amdgpu_gpu_instance *gpu_instance;
52 mutex_lock(&mgpu_info.mutex);
54 for (i = 0; i < mgpu_info.num_gpu; i++) {
55 gpu_instance = &(mgpu_info.gpu_ins[i]);
56 if (gpu_instance->adev == adev) {
57 mgpu_info.gpu_ins[i] =
58 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
60 if (adev->flags & AMD_IS_APU)
68 mutex_unlock(&mgpu_info.mutex);
72 * amdgpu_driver_unload_kms - Main unload function for KMS.
74 * @dev: drm dev pointer
76 * This is the main unload function for KMS (all asics).
77 * Returns 0 on success.
79 void amdgpu_driver_unload_kms(struct drm_device *dev)
81 struct amdgpu_device *adev = drm_to_adev(dev);
86 amdgpu_unregister_gpu_instance(adev);
88 if (adev->rmmio == NULL)
92 pm_runtime_get_sync(dev->dev);
93 pm_runtime_forbid(dev->dev);
96 amdgpu_acpi_fini(adev);
97 amdgpu_device_fini(adev);
100 void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
102 struct amdgpu_gpu_instance *gpu_instance;
104 mutex_lock(&mgpu_info.mutex);
106 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
107 DRM_ERROR("Cannot register more gpu instance\n");
108 mutex_unlock(&mgpu_info.mutex);
112 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
113 gpu_instance->adev = adev;
114 gpu_instance->mgpu_fan_enabled = 0;
117 if (adev->flags & AMD_IS_APU)
120 mgpu_info.num_dgpu++;
122 mutex_unlock(&mgpu_info.mutex);
126 * amdgpu_driver_load_kms - Main load function for KMS.
128 * @adev: pointer to struct amdgpu_device
129 * @flags: device flags
131 * This is the main load function for KMS (all asics).
132 * Returns 0 on success, error on failure.
134 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
136 struct drm_device *dev;
139 dev = adev_to_drm(adev);
141 if (amdgpu_has_atpx() &&
142 (amdgpu_is_atpx_hybrid() ||
143 amdgpu_has_atpx_dgpu_power_cntl()) &&
144 ((flags & AMD_IS_APU) == 0) &&
145 !pci_is_thunderbolt_attached(dev->pdev))
148 /* amdgpu_device_init should report only fatal error
149 * like memory allocation failure or iomapping failure,
150 * or memory manager initialization failure, it must
151 * properly initialize the GPU MC controller and permit
154 r = amdgpu_device_init(adev, flags);
156 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
160 if (amdgpu_device_supports_boco(dev) &&
161 (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */
163 } else if (amdgpu_device_supports_baco(dev) &&
164 (amdgpu_runtime_pm != 0)) {
165 switch (adev->asic_type) {
166 #ifdef CONFIG_DRM_AMDGPU_CIK
172 case CHIP_SIENNA_CICHLID:
173 case CHIP_NAVY_FLOUNDER:
174 /* enable runpm if runpm=1 */
175 if (amdgpu_runtime_pm > 0)
179 /* turn runpm on if noretry=0 */
184 /* enable runpm on VI+ */
190 /* Call ACPI methods: require modeset init
191 * but failure is not fatal
194 acpi_status = amdgpu_acpi_init(adev);
196 dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
199 /* only need to skip on ATPX */
200 if (amdgpu_device_supports_boco(dev) &&
201 !amdgpu_is_atpx_hybrid())
202 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
203 pm_runtime_use_autosuspend(dev->dev);
204 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
205 pm_runtime_allow(dev->dev);
206 pm_runtime_mark_last_busy(dev->dev);
207 pm_runtime_put_autosuspend(dev->dev);
212 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
213 if (adev->rmmio && adev->runpm)
214 pm_runtime_put_noidle(dev->dev);
215 amdgpu_driver_unload_kms(dev);
221 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
222 struct drm_amdgpu_query_fw *query_fw,
223 struct amdgpu_device *adev)
225 switch (query_fw->fw_type) {
226 case AMDGPU_INFO_FW_VCE:
227 fw_info->ver = adev->vce.fw_version;
228 fw_info->feature = adev->vce.fb_version;
230 case AMDGPU_INFO_FW_UVD:
231 fw_info->ver = adev->uvd.fw_version;
232 fw_info->feature = 0;
234 case AMDGPU_INFO_FW_VCN:
235 fw_info->ver = adev->vcn.fw_version;
236 fw_info->feature = 0;
238 case AMDGPU_INFO_FW_GMC:
239 fw_info->ver = adev->gmc.fw_version;
240 fw_info->feature = 0;
242 case AMDGPU_INFO_FW_GFX_ME:
243 fw_info->ver = adev->gfx.me_fw_version;
244 fw_info->feature = adev->gfx.me_feature_version;
246 case AMDGPU_INFO_FW_GFX_PFP:
247 fw_info->ver = adev->gfx.pfp_fw_version;
248 fw_info->feature = adev->gfx.pfp_feature_version;
250 case AMDGPU_INFO_FW_GFX_CE:
251 fw_info->ver = adev->gfx.ce_fw_version;
252 fw_info->feature = adev->gfx.ce_feature_version;
254 case AMDGPU_INFO_FW_GFX_RLC:
255 fw_info->ver = adev->gfx.rlc_fw_version;
256 fw_info->feature = adev->gfx.rlc_feature_version;
258 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
259 fw_info->ver = adev->gfx.rlc_srlc_fw_version;
260 fw_info->feature = adev->gfx.rlc_srlc_feature_version;
262 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
263 fw_info->ver = adev->gfx.rlc_srlg_fw_version;
264 fw_info->feature = adev->gfx.rlc_srlg_feature_version;
266 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
267 fw_info->ver = adev->gfx.rlc_srls_fw_version;
268 fw_info->feature = adev->gfx.rlc_srls_feature_version;
270 case AMDGPU_INFO_FW_GFX_MEC:
271 if (query_fw->index == 0) {
272 fw_info->ver = adev->gfx.mec_fw_version;
273 fw_info->feature = adev->gfx.mec_feature_version;
274 } else if (query_fw->index == 1) {
275 fw_info->ver = adev->gfx.mec2_fw_version;
276 fw_info->feature = adev->gfx.mec2_feature_version;
280 case AMDGPU_INFO_FW_SMC:
281 fw_info->ver = adev->pm.fw_version;
282 fw_info->feature = 0;
284 case AMDGPU_INFO_FW_TA:
285 if (query_fw->index > 1)
287 if (query_fw->index == 0) {
288 fw_info->ver = adev->psp.ta_fw_version;
289 fw_info->feature = adev->psp.ta_xgmi_ucode_version;
291 fw_info->ver = adev->psp.ta_fw_version;
292 fw_info->feature = adev->psp.ta_ras_ucode_version;
295 case AMDGPU_INFO_FW_SDMA:
296 if (query_fw->index >= adev->sdma.num_instances)
298 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
299 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
301 case AMDGPU_INFO_FW_SOS:
302 fw_info->ver = adev->psp.sos_fw_version;
303 fw_info->feature = adev->psp.sos_feature_version;
305 case AMDGPU_INFO_FW_ASD:
306 fw_info->ver = adev->psp.asd_fw_version;
307 fw_info->feature = adev->psp.asd_feature_version;
309 case AMDGPU_INFO_FW_DMCU:
310 fw_info->ver = adev->dm.dmcu_fw_version;
311 fw_info->feature = 0;
313 case AMDGPU_INFO_FW_DMCUB:
314 fw_info->ver = adev->dm.dmcub_fw_version;
315 fw_info->feature = 0;
323 static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
324 struct drm_amdgpu_info *info,
325 struct drm_amdgpu_info_hw_ip *result)
327 uint32_t ib_start_alignment = 0;
328 uint32_t ib_size_alignment = 0;
329 enum amd_ip_block_type type;
330 unsigned int num_rings = 0;
333 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
336 switch (info->query_hw_ip.type) {
337 case AMDGPU_HW_IP_GFX:
338 type = AMD_IP_BLOCK_TYPE_GFX;
339 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
340 if (adev->gfx.gfx_ring[i].sched.ready)
342 ib_start_alignment = 32;
343 ib_size_alignment = 32;
345 case AMDGPU_HW_IP_COMPUTE:
346 type = AMD_IP_BLOCK_TYPE_GFX;
347 for (i = 0; i < adev->gfx.num_compute_rings; i++)
348 if (adev->gfx.compute_ring[i].sched.ready)
350 ib_start_alignment = 32;
351 ib_size_alignment = 32;
353 case AMDGPU_HW_IP_DMA:
354 type = AMD_IP_BLOCK_TYPE_SDMA;
355 for (i = 0; i < adev->sdma.num_instances; i++)
356 if (adev->sdma.instance[i].ring.sched.ready)
358 ib_start_alignment = 256;
359 ib_size_alignment = 4;
361 case AMDGPU_HW_IP_UVD:
362 type = AMD_IP_BLOCK_TYPE_UVD;
363 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
364 if (adev->uvd.harvest_config & (1 << i))
367 if (adev->uvd.inst[i].ring.sched.ready)
370 ib_start_alignment = 64;
371 ib_size_alignment = 64;
373 case AMDGPU_HW_IP_VCE:
374 type = AMD_IP_BLOCK_TYPE_VCE;
375 for (i = 0; i < adev->vce.num_rings; i++)
376 if (adev->vce.ring[i].sched.ready)
378 ib_start_alignment = 4;
379 ib_size_alignment = 1;
381 case AMDGPU_HW_IP_UVD_ENC:
382 type = AMD_IP_BLOCK_TYPE_UVD;
383 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
384 if (adev->uvd.harvest_config & (1 << i))
387 for (j = 0; j < adev->uvd.num_enc_rings; j++)
388 if (adev->uvd.inst[i].ring_enc[j].sched.ready)
391 ib_start_alignment = 64;
392 ib_size_alignment = 64;
394 case AMDGPU_HW_IP_VCN_DEC:
395 type = AMD_IP_BLOCK_TYPE_VCN;
396 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
397 if (adev->uvd.harvest_config & (1 << i))
400 if (adev->vcn.inst[i].ring_dec.sched.ready)
403 ib_start_alignment = 16;
404 ib_size_alignment = 16;
406 case AMDGPU_HW_IP_VCN_ENC:
407 type = AMD_IP_BLOCK_TYPE_VCN;
408 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
409 if (adev->uvd.harvest_config & (1 << i))
412 for (j = 0; j < adev->vcn.num_enc_rings; j++)
413 if (adev->vcn.inst[i].ring_enc[j].sched.ready)
416 ib_start_alignment = 64;
417 ib_size_alignment = 1;
419 case AMDGPU_HW_IP_VCN_JPEG:
420 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
421 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
423 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
424 if (adev->jpeg.harvest_config & (1 << i))
427 if (adev->jpeg.inst[i].ring_dec.sched.ready)
430 ib_start_alignment = 16;
431 ib_size_alignment = 16;
437 for (i = 0; i < adev->num_ip_blocks; i++)
438 if (adev->ip_blocks[i].version->type == type &&
439 adev->ip_blocks[i].status.valid)
442 if (i == adev->num_ip_blocks)
445 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
448 result->hw_ip_version_major = adev->ip_blocks[i].version->major;
449 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
450 result->capabilities_flags = 0;
451 result->available_rings = (1 << num_rings) - 1;
452 result->ib_start_alignment = ib_start_alignment;
453 result->ib_size_alignment = ib_size_alignment;
458 * Userspace get information ioctl
461 * amdgpu_info_ioctl - answer a device specific request.
463 * @adev: amdgpu device pointer
464 * @data: request object
467 * This function is used to pass device specific parameters to the userspace
468 * drivers. Examples include: pci device id, pipeline parms, tiling params,
470 * Returns 0 on success, -EINVAL on failure.
472 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
474 struct amdgpu_device *adev = drm_to_adev(dev);
475 struct drm_amdgpu_info *info = data;
476 struct amdgpu_mode_info *minfo = &adev->mode_info;
477 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
478 uint32_t size = info->return_size;
479 struct drm_crtc *crtc;
483 int ui32_size = sizeof(ui32);
485 if (!info->return_size || !info->return_pointer)
488 switch (info->query) {
489 case AMDGPU_INFO_ACCEL_WORKING:
490 ui32 = adev->accel_working;
491 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
492 case AMDGPU_INFO_CRTC_FROM_ID:
493 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
494 crtc = (struct drm_crtc *)minfo->crtcs[i];
495 if (crtc && crtc->base.id == info->mode_crtc.id) {
496 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
497 ui32 = amdgpu_crtc->crtc_id;
503 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
506 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
507 case AMDGPU_INFO_HW_IP_INFO: {
508 struct drm_amdgpu_info_hw_ip ip = {};
511 ret = amdgpu_hw_ip_info(adev, info, &ip);
515 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
516 return ret ? -EFAULT : 0;
518 case AMDGPU_INFO_HW_IP_COUNT: {
519 enum amd_ip_block_type type;
522 switch (info->query_hw_ip.type) {
523 case AMDGPU_HW_IP_GFX:
524 type = AMD_IP_BLOCK_TYPE_GFX;
526 case AMDGPU_HW_IP_COMPUTE:
527 type = AMD_IP_BLOCK_TYPE_GFX;
529 case AMDGPU_HW_IP_DMA:
530 type = AMD_IP_BLOCK_TYPE_SDMA;
532 case AMDGPU_HW_IP_UVD:
533 type = AMD_IP_BLOCK_TYPE_UVD;
535 case AMDGPU_HW_IP_VCE:
536 type = AMD_IP_BLOCK_TYPE_VCE;
538 case AMDGPU_HW_IP_UVD_ENC:
539 type = AMD_IP_BLOCK_TYPE_UVD;
541 case AMDGPU_HW_IP_VCN_DEC:
542 case AMDGPU_HW_IP_VCN_ENC:
543 type = AMD_IP_BLOCK_TYPE_VCN;
545 case AMDGPU_HW_IP_VCN_JPEG:
546 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
547 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
553 for (i = 0; i < adev->num_ip_blocks; i++)
554 if (adev->ip_blocks[i].version->type == type &&
555 adev->ip_blocks[i].status.valid &&
556 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
559 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
561 case AMDGPU_INFO_TIMESTAMP:
562 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
563 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
564 case AMDGPU_INFO_FW_VERSION: {
565 struct drm_amdgpu_info_firmware fw_info;
568 /* We only support one instance of each IP block right now. */
569 if (info->query_fw.ip_instance != 0)
572 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
576 return copy_to_user(out, &fw_info,
577 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
579 case AMDGPU_INFO_NUM_BYTES_MOVED:
580 ui64 = atomic64_read(&adev->num_bytes_moved);
581 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
582 case AMDGPU_INFO_NUM_EVICTIONS:
583 ui64 = atomic64_read(&adev->num_evictions);
584 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
585 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
586 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
587 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
588 case AMDGPU_INFO_VRAM_USAGE:
589 ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
590 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
591 case AMDGPU_INFO_VIS_VRAM_USAGE:
592 ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
593 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
594 case AMDGPU_INFO_GTT_USAGE:
595 ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
596 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
597 case AMDGPU_INFO_GDS_CONFIG: {
598 struct drm_amdgpu_info_gds gds_info;
600 memset(&gds_info, 0, sizeof(gds_info));
601 gds_info.compute_partition_size = adev->gds.gds_size;
602 gds_info.gds_total_size = adev->gds.gds_size;
603 gds_info.gws_per_compute_partition = adev->gds.gws_size;
604 gds_info.oa_per_compute_partition = adev->gds.oa_size;
605 return copy_to_user(out, &gds_info,
606 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
608 case AMDGPU_INFO_VRAM_GTT: {
609 struct drm_amdgpu_info_vram_gtt vram_gtt;
611 vram_gtt.vram_size = adev->gmc.real_vram_size -
612 atomic64_read(&adev->vram_pin_size) -
613 AMDGPU_VM_RESERVED_VRAM;
614 vram_gtt.vram_cpu_accessible_size =
615 min(adev->gmc.visible_vram_size -
616 atomic64_read(&adev->visible_pin_size),
618 vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
619 vram_gtt.gtt_size *= PAGE_SIZE;
620 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
621 return copy_to_user(out, &vram_gtt,
622 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
624 case AMDGPU_INFO_MEMORY: {
625 struct drm_amdgpu_memory_info mem;
626 struct ttm_resource_manager *vram_man =
627 ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
628 struct ttm_resource_manager *gtt_man =
629 ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
630 memset(&mem, 0, sizeof(mem));
631 mem.vram.total_heap_size = adev->gmc.real_vram_size;
632 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
633 atomic64_read(&adev->vram_pin_size) -
634 AMDGPU_VM_RESERVED_VRAM;
635 mem.vram.heap_usage =
636 amdgpu_vram_mgr_usage(vram_man);
637 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
639 mem.cpu_accessible_vram.total_heap_size =
640 adev->gmc.visible_vram_size;
641 mem.cpu_accessible_vram.usable_heap_size =
642 min(adev->gmc.visible_vram_size -
643 atomic64_read(&adev->visible_pin_size),
644 mem.vram.usable_heap_size);
645 mem.cpu_accessible_vram.heap_usage =
646 amdgpu_vram_mgr_vis_usage(vram_man);
647 mem.cpu_accessible_vram.max_allocation =
648 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
650 mem.gtt.total_heap_size = gtt_man->size;
651 mem.gtt.total_heap_size *= PAGE_SIZE;
652 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
653 atomic64_read(&adev->gart_pin_size);
655 amdgpu_gtt_mgr_usage(gtt_man);
656 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
658 return copy_to_user(out, &mem,
659 min((size_t)size, sizeof(mem)))
662 case AMDGPU_INFO_READ_MMR_REG: {
663 unsigned n, alloc_size;
665 unsigned se_num = (info->read_mmr_reg.instance >>
666 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
667 AMDGPU_INFO_MMR_SE_INDEX_MASK;
668 unsigned sh_num = (info->read_mmr_reg.instance >>
669 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
670 AMDGPU_INFO_MMR_SH_INDEX_MASK;
672 /* set full masks if the userspace set all bits
673 * in the bitfields */
674 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
676 else if (se_num >= AMDGPU_GFX_MAX_SE)
678 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
680 else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
683 if (info->read_mmr_reg.count > 128)
686 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
689 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
691 amdgpu_gfx_off_ctrl(adev, false);
692 for (i = 0; i < info->read_mmr_reg.count; i++) {
693 if (amdgpu_asic_read_register(adev, se_num, sh_num,
694 info->read_mmr_reg.dword_offset + i,
696 DRM_DEBUG_KMS("unallowed offset %#x\n",
697 info->read_mmr_reg.dword_offset + i);
699 amdgpu_gfx_off_ctrl(adev, true);
703 amdgpu_gfx_off_ctrl(adev, true);
704 n = copy_to_user(out, regs, min(size, alloc_size));
706 return n ? -EFAULT : 0;
708 case AMDGPU_INFO_DEV_INFO: {
709 struct drm_amdgpu_info_device dev_info;
712 memset(&dev_info, 0, sizeof(dev_info));
713 dev_info.device_id = dev->pdev->device;
714 dev_info.chip_rev = adev->rev_id;
715 dev_info.external_rev = adev->external_rev_id;
716 dev_info.pci_rev = dev->pdev->revision;
717 dev_info.family = adev->family;
718 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
719 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
720 /* return all clocks in KHz */
721 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
722 if (adev->pm.dpm_enabled) {
723 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
724 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
726 dev_info.max_engine_clock = adev->clock.default_sclk * 10;
727 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
729 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
730 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
731 adev->gfx.config.max_shader_engines;
732 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
734 dev_info.ids_flags = 0;
735 if (adev->flags & AMD_IS_APU)
736 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
737 if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
738 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
739 if (amdgpu_is_tmz(adev))
740 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
742 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
743 vm_size -= AMDGPU_VA_RESERVED_SIZE;
745 /* Older VCE FW versions are buggy and can handle only 40bits */
746 if (adev->vce.fw_version &&
747 adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
748 vm_size = min(vm_size, 1ULL << 40);
750 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
751 dev_info.virtual_address_max =
752 min(vm_size, AMDGPU_GMC_HOLE_START);
754 if (vm_size > AMDGPU_GMC_HOLE_START) {
755 dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
756 dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
758 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
759 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
760 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
761 dev_info.cu_active_number = adev->gfx.cu_info.number;
762 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
763 dev_info.ce_ram_size = adev->gfx.ce_ram_size;
764 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
765 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
766 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
767 sizeof(adev->gfx.cu_info.bitmap));
768 dev_info.vram_type = adev->gmc.vram_type;
769 dev_info.vram_bit_width = adev->gmc.vram_width;
770 dev_info.vce_harvest_config = adev->vce.harvest_config;
771 dev_info.gc_double_offchip_lds_buf =
772 adev->gfx.config.double_offchip_lds_buf;
773 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
774 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
775 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
776 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
777 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
778 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
779 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
781 if (adev->family >= AMDGPU_FAMILY_NV)
782 dev_info.pa_sc_tile_steering_override =
783 adev->gfx.config.pa_sc_tile_steering_override;
785 dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
787 return copy_to_user(out, &dev_info,
788 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
790 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
792 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
793 struct amd_vce_state *vce_state;
795 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
796 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
798 vce_clk_table.entries[i].sclk = vce_state->sclk;
799 vce_clk_table.entries[i].mclk = vce_state->mclk;
800 vce_clk_table.entries[i].eclk = vce_state->evclk;
801 vce_clk_table.num_valid_entries++;
805 return copy_to_user(out, &vce_clk_table,
806 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
808 case AMDGPU_INFO_VBIOS: {
809 uint32_t bios_size = adev->bios_size;
811 switch (info->vbios_info.type) {
812 case AMDGPU_INFO_VBIOS_SIZE:
813 return copy_to_user(out, &bios_size,
814 min((size_t)size, sizeof(bios_size)))
816 case AMDGPU_INFO_VBIOS_IMAGE: {
818 uint32_t bios_offset = info->vbios_info.offset;
820 if (bios_offset >= bios_size)
823 bios = adev->bios + bios_offset;
824 return copy_to_user(out, bios,
825 min((size_t)size, (size_t)(bios_size - bios_offset)))
829 DRM_DEBUG_KMS("Invalid request %d\n",
830 info->vbios_info.type);
834 case AMDGPU_INFO_NUM_HANDLES: {
835 struct drm_amdgpu_info_num_handles handle;
837 switch (info->query_hw_ip.type) {
838 case AMDGPU_HW_IP_UVD:
839 /* Starting Polaris, we support unlimited UVD handles */
840 if (adev->asic_type < CHIP_POLARIS10) {
841 handle.uvd_max_handles = adev->uvd.max_handles;
842 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
844 return copy_to_user(out, &handle,
845 min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
855 case AMDGPU_INFO_SENSOR: {
856 if (!adev->pm.dpm_enabled)
859 switch (info->sensor_info.type) {
860 case AMDGPU_INFO_SENSOR_GFX_SCLK:
861 /* get sclk in Mhz */
862 if (amdgpu_dpm_read_sensor(adev,
863 AMDGPU_PP_SENSOR_GFX_SCLK,
864 (void *)&ui32, &ui32_size)) {
869 case AMDGPU_INFO_SENSOR_GFX_MCLK:
870 /* get mclk in Mhz */
871 if (amdgpu_dpm_read_sensor(adev,
872 AMDGPU_PP_SENSOR_GFX_MCLK,
873 (void *)&ui32, &ui32_size)) {
878 case AMDGPU_INFO_SENSOR_GPU_TEMP:
879 /* get temperature in millidegrees C */
880 if (amdgpu_dpm_read_sensor(adev,
881 AMDGPU_PP_SENSOR_GPU_TEMP,
882 (void *)&ui32, &ui32_size)) {
886 case AMDGPU_INFO_SENSOR_GPU_LOAD:
888 if (amdgpu_dpm_read_sensor(adev,
889 AMDGPU_PP_SENSOR_GPU_LOAD,
890 (void *)&ui32, &ui32_size)) {
894 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
895 /* get average GPU power */
896 if (amdgpu_dpm_read_sensor(adev,
897 AMDGPU_PP_SENSOR_GPU_POWER,
898 (void *)&ui32, &ui32_size)) {
903 case AMDGPU_INFO_SENSOR_VDDNB:
904 /* get VDDNB in millivolts */
905 if (amdgpu_dpm_read_sensor(adev,
906 AMDGPU_PP_SENSOR_VDDNB,
907 (void *)&ui32, &ui32_size)) {
911 case AMDGPU_INFO_SENSOR_VDDGFX:
912 /* get VDDGFX in millivolts */
913 if (amdgpu_dpm_read_sensor(adev,
914 AMDGPU_PP_SENSOR_VDDGFX,
915 (void *)&ui32, &ui32_size)) {
919 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
920 /* get stable pstate sclk in Mhz */
921 if (amdgpu_dpm_read_sensor(adev,
922 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
923 (void *)&ui32, &ui32_size)) {
928 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
929 /* get stable pstate mclk in Mhz */
930 if (amdgpu_dpm_read_sensor(adev,
931 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
932 (void *)&ui32, &ui32_size)) {
938 DRM_DEBUG_KMS("Invalid request %d\n",
939 info->sensor_info.type);
942 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
944 case AMDGPU_INFO_VRAM_LOST_COUNTER:
945 ui32 = atomic_read(&adev->vram_lost_counter);
946 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
947 case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
948 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
953 ras_mask = (uint64_t)ras->supported << 32 | ras->features;
955 return copy_to_user(out, &ras_mask,
956 min_t(u64, size, sizeof(ras_mask))) ?
960 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
968 * Outdated mess for old drm with Xorg being in charge (void function now).
971 * amdgpu_driver_lastclose_kms - drm callback for last close
973 * @dev: drm dev pointer
975 * Switch vga_switcheroo state after last close (all asics).
977 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
979 drm_fb_helper_lastclose(dev);
980 vga_switcheroo_process_delayed_switch();
984 * amdgpu_driver_open_kms - drm callback for open
986 * @dev: drm dev pointer
987 * @file_priv: drm file
989 * On device open, init vm on cayman+ (all asics).
990 * Returns 0 on success, error on failure.
992 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
994 struct amdgpu_device *adev = drm_to_adev(dev);
995 struct amdgpu_fpriv *fpriv;
998 /* Ensure IB tests are run on ring */
999 flush_delayed_work(&adev->delayed_init_work);
1002 if (amdgpu_ras_intr_triggered()) {
1003 DRM_ERROR("RAS Intr triggered, device disabled!!");
1007 file_priv->driver_priv = NULL;
1009 r = pm_runtime_get_sync(dev->dev);
1013 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
1014 if (unlikely(!fpriv)) {
1019 pasid = amdgpu_pasid_alloc(16);
1021 dev_warn(adev->dev, "No more PASIDs available!");
1024 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
1028 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
1029 if (!fpriv->prt_va) {
1034 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1035 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1037 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1038 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
1043 mutex_init(&fpriv->bo_list_lock);
1044 idr_init(&fpriv->bo_list_handles);
1046 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
1048 file_priv->driver_priv = fpriv;
1052 amdgpu_vm_fini(adev, &fpriv->vm);
1056 amdgpu_pasid_free(pasid);
1061 pm_runtime_mark_last_busy(dev->dev);
1063 pm_runtime_put_autosuspend(dev->dev);
1069 * amdgpu_driver_postclose_kms - drm callback for post close
1071 * @dev: drm dev pointer
1072 * @file_priv: drm file
1074 * On device post close, tear down vm on cayman+ (all asics).
1076 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1077 struct drm_file *file_priv)
1079 struct amdgpu_device *adev = drm_to_adev(dev);
1080 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1081 struct amdgpu_bo_list *list;
1082 struct amdgpu_bo *pd;
1089 pm_runtime_get_sync(dev->dev);
1091 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1092 amdgpu_uvd_free_handles(adev, file_priv);
1093 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1094 amdgpu_vce_free_handles(adev, file_priv);
1096 amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
1098 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1099 /* TODO: how to handle reserve failure */
1100 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
1101 amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
1102 fpriv->csa_va = NULL;
1103 amdgpu_bo_unreserve(adev->virt.csa_obj);
1106 pasid = fpriv->vm.pasid;
1107 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
1109 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1110 amdgpu_vm_fini(adev, &fpriv->vm);
1113 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
1114 amdgpu_bo_unref(&pd);
1116 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1117 amdgpu_bo_list_put(list);
1119 idr_destroy(&fpriv->bo_list_handles);
1120 mutex_destroy(&fpriv->bo_list_lock);
1123 file_priv->driver_priv = NULL;
1125 pm_runtime_mark_last_busy(dev->dev);
1126 pm_runtime_put_autosuspend(dev->dev);
1130 * VBlank related functions.
1133 * amdgpu_get_vblank_counter_kms - get frame count
1135 * @crtc: crtc to get the frame count from
1137 * Gets the frame count on the requested crtc (all asics).
1138 * Returns frame count on success, -EINVAL on failure.
1140 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
1142 struct drm_device *dev = crtc->dev;
1143 unsigned int pipe = crtc->index;
1144 struct amdgpu_device *adev = drm_to_adev(dev);
1145 int vpos, hpos, stat;
1148 if (pipe >= adev->mode_info.num_crtc) {
1149 DRM_ERROR("Invalid crtc %u\n", pipe);
1153 /* The hw increments its frame counter at start of vsync, not at start
1154 * of vblank, as is required by DRM core vblank counter handling.
1155 * Cook the hw count here to make it appear to the caller as if it
1156 * incremented at start of vblank. We measure distance to start of
1157 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1158 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1159 * result by 1 to give the proper appearance to caller.
1161 if (adev->mode_info.crtcs[pipe]) {
1162 /* Repeat readout if needed to provide stable result if
1163 * we cross start of vsync during the queries.
1166 count = amdgpu_display_vblank_get_counter(adev, pipe);
1167 /* Ask amdgpu_display_get_crtc_scanoutpos to return
1168 * vpos as distance to start of vblank, instead of
1169 * regular vertical scanout pos.
1171 stat = amdgpu_display_get_crtc_scanoutpos(
1172 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1173 &vpos, &hpos, NULL, NULL,
1174 &adev->mode_info.crtcs[pipe]->base.hwmode);
1175 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1177 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1178 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1179 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1181 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1184 /* Bump counter if we are at >= leading edge of vblank,
1185 * but before vsync where vpos would turn negative and
1186 * the hw counter really increments.
1192 /* Fallback to use value as is. */
1193 count = amdgpu_display_vblank_get_counter(adev, pipe);
1194 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1201 * amdgpu_enable_vblank_kms - enable vblank interrupt
1203 * @crtc: crtc to enable vblank interrupt for
1205 * Enable the interrupt on the requested crtc (all asics).
1206 * Returns 0 on success, -EINVAL on failure.
1208 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
1210 struct drm_device *dev = crtc->dev;
1211 unsigned int pipe = crtc->index;
1212 struct amdgpu_device *adev = drm_to_adev(dev);
1213 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1215 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1219 * amdgpu_disable_vblank_kms - disable vblank interrupt
1221 * @crtc: crtc to disable vblank interrupt for
1223 * Disable the interrupt on the requested crtc (all asics).
1225 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
1227 struct drm_device *dev = crtc->dev;
1228 unsigned int pipe = crtc->index;
1229 struct amdgpu_device *adev = drm_to_adev(dev);
1230 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1232 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1235 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1236 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1237 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1238 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1239 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1240 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1241 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1243 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1244 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1245 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1246 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1247 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1248 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1249 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1250 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1251 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1252 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1254 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1259 #if defined(CONFIG_DEBUG_FS)
1261 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1263 struct drm_info_node *node = (struct drm_info_node *) m->private;
1264 struct drm_device *dev = node->minor->dev;
1265 struct amdgpu_device *adev = drm_to_adev(dev);
1266 struct drm_amdgpu_info_firmware fw_info;
1267 struct drm_amdgpu_query_fw query_fw;
1268 struct atom_context *ctx = adev->mode_info.atom_context;
1272 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1273 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1276 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1277 fw_info.feature, fw_info.ver);
1280 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1281 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1284 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1285 fw_info.feature, fw_info.ver);
1288 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1289 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1292 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1293 fw_info.feature, fw_info.ver);
1296 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1297 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1300 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1301 fw_info.feature, fw_info.ver);
1304 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1305 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1308 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1309 fw_info.feature, fw_info.ver);
1312 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1313 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1316 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1317 fw_info.feature, fw_info.ver);
1320 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1321 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1324 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1325 fw_info.feature, fw_info.ver);
1327 /* RLC SAVE RESTORE LIST CNTL */
1328 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1329 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1332 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1333 fw_info.feature, fw_info.ver);
1335 /* RLC SAVE RESTORE LIST GPM MEM */
1336 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1337 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1340 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1341 fw_info.feature, fw_info.ver);
1343 /* RLC SAVE RESTORE LIST SRM MEM */
1344 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1345 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1348 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1349 fw_info.feature, fw_info.ver);
1352 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1354 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1357 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1358 fw_info.feature, fw_info.ver);
1361 if (adev->gfx.mec2_fw) {
1363 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1366 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1367 fw_info.feature, fw_info.ver);
1371 query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1372 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1375 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1376 fw_info.feature, fw_info.ver);
1380 query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1381 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1384 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1385 fw_info.feature, fw_info.ver);
1387 query_fw.fw_type = AMDGPU_INFO_FW_TA;
1388 for (i = 0; i < 2; i++) {
1390 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1393 seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
1394 i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
1398 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1399 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1402 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1403 fw_info.feature, fw_info.ver);
1406 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1407 for (i = 0; i < adev->sdma.num_instances; i++) {
1409 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1412 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1413 i, fw_info.feature, fw_info.ver);
1417 query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1418 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1421 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1422 fw_info.feature, fw_info.ver);
1425 query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1426 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1429 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1430 fw_info.feature, fw_info.ver);
1433 query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
1434 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1437 seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
1438 fw_info.feature, fw_info.ver);
1441 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1446 static const struct drm_info_list amdgpu_firmware_info_list[] = {
1447 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1451 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1453 #if defined(CONFIG_DEBUG_FS)
1454 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1455 ARRAY_SIZE(amdgpu_firmware_info_list));