2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_uvd.h"
32 #include "amdgpu_vce.h"
34 #include <linux/vga_switcheroo.h>
35 #include <linux/slab.h>
36 #include <linux/pm_runtime.h>
37 #include "amdgpu_amdkfd.h"
39 #if defined(CONFIG_VGA_SWITCHEROO)
40 bool amdgpu_has_atpx(void);
42 static inline bool amdgpu_has_atpx(void) { return false; }
46 * amdgpu_driver_unload_kms - Main unload function for KMS.
48 * @dev: drm dev pointer
50 * This is the main unload function for KMS (all asics).
51 * Returns 0 on success.
53 int amdgpu_driver_unload_kms(struct drm_device *dev)
55 struct amdgpu_device *adev = dev->dev_private;
60 if (adev->rmmio == NULL)
63 if (amdgpu_device_is_px(dev)) {
64 pm_runtime_get_sync(dev->dev);
65 pm_runtime_forbid(dev->dev);
68 amdgpu_amdkfd_device_fini(adev);
70 amdgpu_acpi_fini(adev);
72 amdgpu_device_fini(adev);
76 dev->dev_private = NULL;
81 * amdgpu_driver_load_kms - Main load function for KMS.
83 * @dev: drm dev pointer
84 * @flags: device flags
86 * This is the main load function for KMS (all asics).
87 * Returns 0 on success, error on failure.
89 int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
91 struct amdgpu_device *adev;
94 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
98 dev->dev_private = (void *)adev;
100 if ((amdgpu_runtime_pm != 0) &&
102 (amdgpu_is_atpx_hybrid() ||
103 amdgpu_has_atpx_dgpu_power_cntl()) &&
104 ((flags & AMD_IS_APU) == 0))
107 /* amdgpu_device_init should report only fatal error
108 * like memory allocation failure or iomapping failure,
109 * or memory manager initialization failure, it must
110 * properly initialize the GPU MC controller and permit
113 r = amdgpu_device_init(adev, dev, dev->pdev, flags);
115 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
119 /* Call ACPI methods: require modeset init
120 * but failure is not fatal
123 acpi_status = amdgpu_acpi_init(adev);
125 dev_dbg(&dev->pdev->dev,
126 "Error during ACPI methods call\n");
129 amdgpu_amdkfd_load_interface(adev);
130 amdgpu_amdkfd_device_probe(adev);
131 amdgpu_amdkfd_device_init(adev);
133 if (amdgpu_device_is_px(dev)) {
134 pm_runtime_use_autosuspend(dev->dev);
135 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
136 pm_runtime_set_active(dev->dev);
137 pm_runtime_allow(dev->dev);
138 pm_runtime_mark_last_busy(dev->dev);
139 pm_runtime_put_autosuspend(dev->dev);
144 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
145 if (adev->rmmio && amdgpu_device_is_px(dev))
146 pm_runtime_put_noidle(dev->dev);
147 amdgpu_driver_unload_kms(dev);
153 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
154 struct drm_amdgpu_query_fw *query_fw,
155 struct amdgpu_device *adev)
157 switch (query_fw->fw_type) {
158 case AMDGPU_INFO_FW_VCE:
159 fw_info->ver = adev->vce.fw_version;
160 fw_info->feature = adev->vce.fb_version;
162 case AMDGPU_INFO_FW_UVD:
163 fw_info->ver = adev->uvd.fw_version;
164 fw_info->feature = 0;
166 case AMDGPU_INFO_FW_GMC:
167 fw_info->ver = adev->mc.fw_version;
168 fw_info->feature = 0;
170 case AMDGPU_INFO_FW_GFX_ME:
171 fw_info->ver = adev->gfx.me_fw_version;
172 fw_info->feature = adev->gfx.me_feature_version;
174 case AMDGPU_INFO_FW_GFX_PFP:
175 fw_info->ver = adev->gfx.pfp_fw_version;
176 fw_info->feature = adev->gfx.pfp_feature_version;
178 case AMDGPU_INFO_FW_GFX_CE:
179 fw_info->ver = adev->gfx.ce_fw_version;
180 fw_info->feature = adev->gfx.ce_feature_version;
182 case AMDGPU_INFO_FW_GFX_RLC:
183 fw_info->ver = adev->gfx.rlc_fw_version;
184 fw_info->feature = adev->gfx.rlc_feature_version;
186 case AMDGPU_INFO_FW_GFX_MEC:
187 if (query_fw->index == 0) {
188 fw_info->ver = adev->gfx.mec_fw_version;
189 fw_info->feature = adev->gfx.mec_feature_version;
190 } else if (query_fw->index == 1) {
191 fw_info->ver = adev->gfx.mec2_fw_version;
192 fw_info->feature = adev->gfx.mec2_feature_version;
196 case AMDGPU_INFO_FW_SMC:
197 fw_info->ver = adev->pm.fw_version;
198 fw_info->feature = 0;
200 case AMDGPU_INFO_FW_SDMA:
201 if (query_fw->index >= adev->sdma.num_instances)
203 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
204 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
213 * Userspace get information ioctl
216 * amdgpu_info_ioctl - answer a device specific request.
218 * @adev: amdgpu device pointer
219 * @data: request object
222 * This function is used to pass device specific parameters to the userspace
223 * drivers. Examples include: pci device id, pipeline parms, tiling params,
225 * Returns 0 on success, -EINVAL on failure.
227 static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
229 struct amdgpu_device *adev = dev->dev_private;
230 struct drm_amdgpu_info *info = data;
231 struct amdgpu_mode_info *minfo = &adev->mode_info;
232 void __user *out = (void __user *)(long)info->return_pointer;
233 uint32_t size = info->return_size;
234 struct drm_crtc *crtc;
239 if (!info->return_size || !info->return_pointer)
242 switch (info->query) {
243 case AMDGPU_INFO_ACCEL_WORKING:
244 ui32 = adev->accel_working;
245 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
246 case AMDGPU_INFO_CRTC_FROM_ID:
247 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
248 crtc = (struct drm_crtc *)minfo->crtcs[i];
249 if (crtc && crtc->base.id == info->mode_crtc.id) {
250 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
251 ui32 = amdgpu_crtc->crtc_id;
257 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
260 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
261 case AMDGPU_INFO_HW_IP_INFO: {
262 struct drm_amdgpu_info_hw_ip ip = {};
263 enum amd_ip_block_type type;
264 uint32_t ring_mask = 0;
265 uint32_t ib_start_alignment = 0;
266 uint32_t ib_size_alignment = 0;
268 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
271 switch (info->query_hw_ip.type) {
272 case AMDGPU_HW_IP_GFX:
273 type = AMD_IP_BLOCK_TYPE_GFX;
274 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
275 ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
276 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
277 ib_size_alignment = 8;
279 case AMDGPU_HW_IP_COMPUTE:
280 type = AMD_IP_BLOCK_TYPE_GFX;
281 for (i = 0; i < adev->gfx.num_compute_rings; i++)
282 ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
283 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
284 ib_size_alignment = 8;
286 case AMDGPU_HW_IP_DMA:
287 type = AMD_IP_BLOCK_TYPE_SDMA;
288 for (i = 0; i < adev->sdma.num_instances; i++)
289 ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
290 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
291 ib_size_alignment = 1;
293 case AMDGPU_HW_IP_UVD:
294 type = AMD_IP_BLOCK_TYPE_UVD;
295 ring_mask = adev->uvd.ring.ready ? 1 : 0;
296 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
297 ib_size_alignment = 16;
299 case AMDGPU_HW_IP_VCE:
300 type = AMD_IP_BLOCK_TYPE_VCE;
301 for (i = 0; i < adev->vce.num_rings; i++)
302 ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
303 ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
304 ib_size_alignment = 1;
310 for (i = 0; i < adev->num_ip_blocks; i++) {
311 if (adev->ip_blocks[i].version->type == type &&
312 adev->ip_blocks[i].status.valid) {
313 ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
314 ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
315 ip.capabilities_flags = 0;
316 ip.available_rings = ring_mask;
317 ip.ib_start_alignment = ib_start_alignment;
318 ip.ib_size_alignment = ib_size_alignment;
322 return copy_to_user(out, &ip,
323 min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
325 case AMDGPU_INFO_HW_IP_COUNT: {
326 enum amd_ip_block_type type;
329 switch (info->query_hw_ip.type) {
330 case AMDGPU_HW_IP_GFX:
331 type = AMD_IP_BLOCK_TYPE_GFX;
333 case AMDGPU_HW_IP_COMPUTE:
334 type = AMD_IP_BLOCK_TYPE_GFX;
336 case AMDGPU_HW_IP_DMA:
337 type = AMD_IP_BLOCK_TYPE_SDMA;
339 case AMDGPU_HW_IP_UVD:
340 type = AMD_IP_BLOCK_TYPE_UVD;
342 case AMDGPU_HW_IP_VCE:
343 type = AMD_IP_BLOCK_TYPE_VCE;
349 for (i = 0; i < adev->num_ip_blocks; i++)
350 if (adev->ip_blocks[i].version->type == type &&
351 adev->ip_blocks[i].status.valid &&
352 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
355 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
357 case AMDGPU_INFO_TIMESTAMP:
358 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
359 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
360 case AMDGPU_INFO_FW_VERSION: {
361 struct drm_amdgpu_info_firmware fw_info;
364 /* We only support one instance of each IP block right now. */
365 if (info->query_fw.ip_instance != 0)
368 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
372 return copy_to_user(out, &fw_info,
373 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
375 case AMDGPU_INFO_NUM_BYTES_MOVED:
376 ui64 = atomic64_read(&adev->num_bytes_moved);
377 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
378 case AMDGPU_INFO_NUM_EVICTIONS:
379 ui64 = atomic64_read(&adev->num_evictions);
380 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
381 case AMDGPU_INFO_VRAM_USAGE:
382 ui64 = atomic64_read(&adev->vram_usage);
383 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
384 case AMDGPU_INFO_VIS_VRAM_USAGE:
385 ui64 = atomic64_read(&adev->vram_vis_usage);
386 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
387 case AMDGPU_INFO_GTT_USAGE:
388 ui64 = atomic64_read(&adev->gtt_usage);
389 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
390 case AMDGPU_INFO_GDS_CONFIG: {
391 struct drm_amdgpu_info_gds gds_info;
393 memset(&gds_info, 0, sizeof(gds_info));
394 gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
395 gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
396 gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
397 gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
398 gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
399 gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
400 gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
401 return copy_to_user(out, &gds_info,
402 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
404 case AMDGPU_INFO_VRAM_GTT: {
405 struct drm_amdgpu_info_vram_gtt vram_gtt;
407 vram_gtt.vram_size = adev->mc.real_vram_size;
408 vram_gtt.vram_size -= adev->vram_pin_size;
409 vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
410 vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
411 vram_gtt.gtt_size = adev->mc.gtt_size;
412 vram_gtt.gtt_size -= adev->gart_pin_size;
413 return copy_to_user(out, &vram_gtt,
414 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
416 case AMDGPU_INFO_MEMORY: {
417 struct drm_amdgpu_memory_info mem;
419 memset(&mem, 0, sizeof(mem));
420 mem.vram.total_heap_size = adev->mc.real_vram_size;
421 mem.vram.usable_heap_size =
422 adev->mc.real_vram_size - adev->vram_pin_size;
423 mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
424 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
426 mem.cpu_accessible_vram.total_heap_size =
427 adev->mc.visible_vram_size;
428 mem.cpu_accessible_vram.usable_heap_size =
429 adev->mc.visible_vram_size -
430 (adev->vram_pin_size - adev->invisible_pin_size);
431 mem.cpu_accessible_vram.heap_usage =
432 atomic64_read(&adev->vram_vis_usage);
433 mem.cpu_accessible_vram.max_allocation =
434 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
436 mem.gtt.total_heap_size = adev->mc.gtt_size;
437 mem.gtt.usable_heap_size =
438 adev->mc.gtt_size - adev->gart_pin_size;
439 mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
440 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
442 return copy_to_user(out, &mem,
443 min((size_t)size, sizeof(mem)))
446 case AMDGPU_INFO_READ_MMR_REG: {
447 unsigned n, alloc_size;
449 unsigned se_num = (info->read_mmr_reg.instance >>
450 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
451 AMDGPU_INFO_MMR_SE_INDEX_MASK;
452 unsigned sh_num = (info->read_mmr_reg.instance >>
453 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
454 AMDGPU_INFO_MMR_SH_INDEX_MASK;
456 /* set full masks if the userspace set all bits
457 * in the bitfields */
458 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
460 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
463 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
466 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
468 for (i = 0; i < info->read_mmr_reg.count; i++)
469 if (amdgpu_asic_read_register(adev, se_num, sh_num,
470 info->read_mmr_reg.dword_offset + i,
472 DRM_DEBUG_KMS("unallowed offset %#x\n",
473 info->read_mmr_reg.dword_offset + i);
477 n = copy_to_user(out, regs, min(size, alloc_size));
479 return n ? -EFAULT : 0;
481 case AMDGPU_INFO_DEV_INFO: {
482 struct drm_amdgpu_info_device dev_info = {};
484 dev_info.device_id = dev->pdev->device;
485 dev_info.chip_rev = adev->rev_id;
486 dev_info.external_rev = adev->external_rev_id;
487 dev_info.pci_rev = dev->pdev->revision;
488 dev_info.family = adev->family;
489 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
490 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
491 /* return all clocks in KHz */
492 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
493 if (adev->pm.dpm_enabled) {
494 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
495 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
497 dev_info.max_engine_clock = adev->pm.default_sclk * 10;
498 dev_info.max_memory_clock = adev->pm.default_mclk * 10;
500 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
501 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
502 adev->gfx.config.max_shader_engines;
503 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
505 dev_info.ids_flags = 0;
506 if (adev->flags & AMD_IS_APU)
507 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
508 if (amdgpu_sriov_vf(adev))
509 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
510 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
511 dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
512 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
513 dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
514 AMDGPU_GPU_PAGE_SIZE;
515 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
517 dev_info.cu_active_number = adev->gfx.cu_info.number;
518 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
519 dev_info.ce_ram_size = adev->gfx.ce_ram_size;
520 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
521 sizeof(adev->gfx.cu_info.bitmap));
522 dev_info.vram_type = adev->mc.vram_type;
523 dev_info.vram_bit_width = adev->mc.vram_width;
524 dev_info.vce_harvest_config = adev->vce.harvest_config;
526 return copy_to_user(out, &dev_info,
527 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
529 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
531 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
532 struct amd_vce_state *vce_state;
534 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
535 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
537 vce_clk_table.entries[i].sclk = vce_state->sclk;
538 vce_clk_table.entries[i].mclk = vce_state->mclk;
539 vce_clk_table.entries[i].eclk = vce_state->evclk;
540 vce_clk_table.num_valid_entries++;
544 return copy_to_user(out, &vce_clk_table,
545 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
547 case AMDGPU_INFO_VBIOS: {
548 uint32_t bios_size = adev->bios_size;
550 switch (info->vbios_info.type) {
551 case AMDGPU_INFO_VBIOS_SIZE:
552 return copy_to_user(out, &bios_size,
553 min((size_t)size, sizeof(bios_size)))
555 case AMDGPU_INFO_VBIOS_IMAGE: {
557 uint32_t bios_offset = info->vbios_info.offset;
559 if (bios_offset >= bios_size)
562 bios = adev->bios + bios_offset;
563 return copy_to_user(out, bios,
564 min((size_t)size, (size_t)(bios_size - bios_offset)))
568 DRM_DEBUG_KMS("Invalid request %d\n",
569 info->vbios_info.type);
574 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
582 * Outdated mess for old drm with Xorg being in charge (void function now).
585 * amdgpu_driver_lastclose_kms - drm callback for last close
587 * @dev: drm dev pointer
589 * Switch vga_switcheroo state after last close (all asics).
591 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
593 struct amdgpu_device *adev = dev->dev_private;
595 amdgpu_fbdev_restore_mode(adev);
596 vga_switcheroo_process_delayed_switch();
600 * amdgpu_driver_open_kms - drm callback for open
602 * @dev: drm dev pointer
603 * @file_priv: drm file
605 * On device open, init vm on cayman+ (all asics).
606 * Returns 0 on success, error on failure.
608 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
610 struct amdgpu_device *adev = dev->dev_private;
611 struct amdgpu_fpriv *fpriv;
614 file_priv->driver_priv = NULL;
616 r = pm_runtime_get_sync(dev->dev);
620 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
621 if (unlikely(!fpriv)) {
626 r = amdgpu_vm_init(adev, &fpriv->vm);
632 mutex_init(&fpriv->bo_list_lock);
633 idr_init(&fpriv->bo_list_handles);
635 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
637 file_priv->driver_priv = fpriv;
640 pm_runtime_mark_last_busy(dev->dev);
641 pm_runtime_put_autosuspend(dev->dev);
647 * amdgpu_driver_postclose_kms - drm callback for post close
649 * @dev: drm dev pointer
650 * @file_priv: drm file
652 * On device post close, tear down vm on cayman+ (all asics).
654 void amdgpu_driver_postclose_kms(struct drm_device *dev,
655 struct drm_file *file_priv)
657 struct amdgpu_device *adev = dev->dev_private;
658 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
659 struct amdgpu_bo_list *list;
665 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
667 amdgpu_uvd_free_handles(adev, file_priv);
668 amdgpu_vce_free_handles(adev, file_priv);
670 amdgpu_vm_fini(adev, &fpriv->vm);
672 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
673 amdgpu_bo_list_free(list);
675 idr_destroy(&fpriv->bo_list_handles);
676 mutex_destroy(&fpriv->bo_list_lock);
679 file_priv->driver_priv = NULL;
681 pm_runtime_mark_last_busy(dev->dev);
682 pm_runtime_put_autosuspend(dev->dev);
686 * amdgpu_driver_preclose_kms - drm callback for pre close
688 * @dev: drm dev pointer
689 * @file_priv: drm file
691 * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
694 void amdgpu_driver_preclose_kms(struct drm_device *dev,
695 struct drm_file *file_priv)
697 pm_runtime_get_sync(dev->dev);
701 * VBlank related functions.
704 * amdgpu_get_vblank_counter_kms - get frame count
706 * @dev: drm dev pointer
707 * @pipe: crtc to get the frame count from
709 * Gets the frame count on the requested crtc (all asics).
710 * Returns frame count on success, -EINVAL on failure.
712 u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
714 struct amdgpu_device *adev = dev->dev_private;
715 int vpos, hpos, stat;
718 if (pipe >= adev->mode_info.num_crtc) {
719 DRM_ERROR("Invalid crtc %u\n", pipe);
723 /* The hw increments its frame counter at start of vsync, not at start
724 * of vblank, as is required by DRM core vblank counter handling.
725 * Cook the hw count here to make it appear to the caller as if it
726 * incremented at start of vblank. We measure distance to start of
727 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
728 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
729 * result by 1 to give the proper appearance to caller.
731 if (adev->mode_info.crtcs[pipe]) {
732 /* Repeat readout if needed to provide stable result if
733 * we cross start of vsync during the queries.
736 count = amdgpu_display_vblank_get_counter(adev, pipe);
737 /* Ask amdgpu_get_crtc_scanoutpos to return vpos as
738 * distance to start of vblank, instead of regular
739 * vertical scanout pos.
741 stat = amdgpu_get_crtc_scanoutpos(
742 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
743 &vpos, &hpos, NULL, NULL,
744 &adev->mode_info.crtcs[pipe]->base.hwmode);
745 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
747 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
748 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
749 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
751 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
754 /* Bump counter if we are at >= leading edge of vblank,
755 * but before vsync where vpos would turn negative and
756 * the hw counter really increments.
762 /* Fallback to use value as is. */
763 count = amdgpu_display_vblank_get_counter(adev, pipe);
764 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
771 * amdgpu_enable_vblank_kms - enable vblank interrupt
773 * @dev: drm dev pointer
774 * @pipe: crtc to enable vblank interrupt for
776 * Enable the interrupt on the requested crtc (all asics).
777 * Returns 0 on success, -EINVAL on failure.
779 int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
781 struct amdgpu_device *adev = dev->dev_private;
782 int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
784 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
788 * amdgpu_disable_vblank_kms - disable vblank interrupt
790 * @dev: drm dev pointer
791 * @pipe: crtc to disable vblank interrupt for
793 * Disable the interrupt on the requested crtc (all asics).
795 void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
797 struct amdgpu_device *adev = dev->dev_private;
798 int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
800 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
804 * amdgpu_get_vblank_timestamp_kms - get vblank timestamp
806 * @dev: drm dev pointer
807 * @crtc: crtc to get the timestamp for
808 * @max_error: max error
809 * @vblank_time: time value
810 * @flags: flags passed to the driver
812 * Gets the timestamp on the requested crtc based on the
813 * scanout position. (all asics).
814 * Returns postive status flags on success, negative error on failure.
816 int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
818 struct timeval *vblank_time,
821 struct drm_crtc *crtc;
822 struct amdgpu_device *adev = dev->dev_private;
824 if (pipe >= dev->num_crtcs) {
825 DRM_ERROR("Invalid crtc %u\n", pipe);
829 /* Get associated drm_crtc: */
830 crtc = &adev->mode_info.crtcs[pipe]->base;
832 /* This can occur on driver load if some component fails to
833 * initialize completely and driver is unloaded */
834 DRM_ERROR("Uninitialized crtc %d\n", pipe);
838 /* Helper routine in DRM core does all the work: */
839 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
844 const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
845 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
846 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
847 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
849 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
850 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
851 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
852 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
853 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
854 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
855 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
856 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
857 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
858 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
860 const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
865 #if defined(CONFIG_DEBUG_FS)
867 static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
869 struct drm_info_node *node = (struct drm_info_node *) m->private;
870 struct drm_device *dev = node->minor->dev;
871 struct amdgpu_device *adev = dev->dev_private;
872 struct drm_amdgpu_info_firmware fw_info;
873 struct drm_amdgpu_query_fw query_fw;
877 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
878 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
881 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
882 fw_info.feature, fw_info.ver);
885 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
886 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
889 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
890 fw_info.feature, fw_info.ver);
893 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
894 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
897 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
898 fw_info.feature, fw_info.ver);
901 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
902 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
905 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
906 fw_info.feature, fw_info.ver);
909 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
910 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
913 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
914 fw_info.feature, fw_info.ver);
917 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
918 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
921 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
922 fw_info.feature, fw_info.ver);
925 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
926 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
929 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
930 fw_info.feature, fw_info.ver);
933 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
935 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
938 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
939 fw_info.feature, fw_info.ver);
942 if (adev->asic_type == CHIP_KAVERI ||
943 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
945 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
948 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
949 fw_info.feature, fw_info.ver);
953 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
954 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
957 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
958 fw_info.feature, fw_info.ver);
961 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
962 for (i = 0; i < adev->sdma.num_instances; i++) {
964 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
967 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
968 i, fw_info.feature, fw_info.ver);
974 static const struct drm_info_list amdgpu_firmware_info_list[] = {
975 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
979 int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
981 #if defined(CONFIG_DEBUG_FS)
982 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
983 ARRAY_SIZE(amdgpu_firmware_info_list));