2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/kthread.h>
28 #include <linux/debugfs.h>
34 int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
35 const struct drm_info_list *files,
40 for (i = 0; i < adev->debugfs_count; i++) {
41 if (adev->debugfs[i].files == files) {
42 /* Already registered */
47 i = adev->debugfs_count + 1;
48 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
49 DRM_ERROR("Reached maximum number of debugfs components.\n");
50 DRM_ERROR("Report so we increase "
51 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
54 adev->debugfs[adev->debugfs_count].files = files;
55 adev->debugfs[adev->debugfs_count].num_files = nfiles;
56 adev->debugfs_count = i;
57 #if defined(CONFIG_DEBUG_FS)
58 drm_debugfs_create_files(files, nfiles,
59 adev->ddev->primary->debugfs_root,
65 #if defined(CONFIG_DEBUG_FS)
68 static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
69 char __user *buf, size_t size, loff_t *pos)
71 struct amdgpu_device *adev = file_inode(f)->i_private;
74 bool pm_pg_lock, use_bank, use_ring;
75 unsigned instance_bank, sh_bank, se_bank, me, pipe, queue;
77 pm_pg_lock = use_bank = use_ring = false;
78 instance_bank = sh_bank = se_bank = me = pipe = queue = 0;
80 if (size & 0x3 || *pos & 0x3 ||
81 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
84 /* are we reading registers for which a PG lock is necessary? */
85 pm_pg_lock = (*pos >> 23) & 1;
87 if (*pos & (1ULL << 62)) {
88 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
89 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
90 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
96 if (instance_bank == 0x3FF)
97 instance_bank = 0xFFFFFFFF;
99 } else if (*pos & (1ULL << 61)) {
101 me = (*pos & GENMASK_ULL(33, 24)) >> 24;
102 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
103 queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
107 use_bank = use_ring = 0;
110 *pos &= (1UL << 22) - 1;
113 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
114 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
116 mutex_lock(&adev->grbm_idx_mutex);
117 amdgpu_gfx_select_se_sh(adev, se_bank,
118 sh_bank, instance_bank);
119 } else if (use_ring) {
120 mutex_lock(&adev->srbm_mutex);
121 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue);
125 mutex_lock(&adev->pm.mutex);
130 if (*pos > adev->rmmio_size)
134 value = RREG32(*pos >> 2);
135 r = put_user(value, (uint32_t *)buf);
137 r = get_user(value, (uint32_t *)buf);
139 WREG32(*pos >> 2, value);
154 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
155 mutex_unlock(&adev->grbm_idx_mutex);
156 } else if (use_ring) {
157 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0);
158 mutex_unlock(&adev->srbm_mutex);
162 mutex_unlock(&adev->pm.mutex);
168 static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
169 size_t size, loff_t *pos)
171 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
174 static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
175 size_t size, loff_t *pos)
177 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
180 static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
181 size_t size, loff_t *pos)
183 struct amdgpu_device *adev = file_inode(f)->i_private;
187 if (size & 0x3 || *pos & 0x3)
193 value = RREG32_PCIE(*pos >> 2);
194 r = put_user(value, (uint32_t *)buf);
207 static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
208 size_t size, loff_t *pos)
210 struct amdgpu_device *adev = file_inode(f)->i_private;
214 if (size & 0x3 || *pos & 0x3)
220 r = get_user(value, (uint32_t *)buf);
224 WREG32_PCIE(*pos >> 2, value);
235 static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
236 size_t size, loff_t *pos)
238 struct amdgpu_device *adev = file_inode(f)->i_private;
242 if (size & 0x3 || *pos & 0x3)
248 value = RREG32_DIDT(*pos >> 2);
249 r = put_user(value, (uint32_t *)buf);
262 static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
263 size_t size, loff_t *pos)
265 struct amdgpu_device *adev = file_inode(f)->i_private;
269 if (size & 0x3 || *pos & 0x3)
275 r = get_user(value, (uint32_t *)buf);
279 WREG32_DIDT(*pos >> 2, value);
290 static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
291 size_t size, loff_t *pos)
293 struct amdgpu_device *adev = file_inode(f)->i_private;
297 if (size & 0x3 || *pos & 0x3)
303 value = RREG32_SMC(*pos);
304 r = put_user(value, (uint32_t *)buf);
317 static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
318 size_t size, loff_t *pos)
320 struct amdgpu_device *adev = file_inode(f)->i_private;
324 if (size & 0x3 || *pos & 0x3)
330 r = get_user(value, (uint32_t *)buf);
334 WREG32_SMC(*pos, value);
345 static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
346 size_t size, loff_t *pos)
348 struct amdgpu_device *adev = file_inode(f)->i_private;
351 uint32_t *config, no_regs = 0;
353 if (size & 0x3 || *pos & 0x3)
356 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
360 /* version, increment each time something is added */
361 config[no_regs++] = 3;
362 config[no_regs++] = adev->gfx.config.max_shader_engines;
363 config[no_regs++] = adev->gfx.config.max_tile_pipes;
364 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
365 config[no_regs++] = adev->gfx.config.max_sh_per_se;
366 config[no_regs++] = adev->gfx.config.max_backends_per_se;
367 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
368 config[no_regs++] = adev->gfx.config.max_gprs;
369 config[no_regs++] = adev->gfx.config.max_gs_threads;
370 config[no_regs++] = adev->gfx.config.max_hw_contexts;
371 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
372 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
373 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
374 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
375 config[no_regs++] = adev->gfx.config.num_tile_pipes;
376 config[no_regs++] = adev->gfx.config.backend_enable_mask;
377 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
378 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
379 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
380 config[no_regs++] = adev->gfx.config.num_gpus;
381 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
382 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
383 config[no_regs++] = adev->gfx.config.gb_addr_config;
384 config[no_regs++] = adev->gfx.config.num_rbs;
387 config[no_regs++] = adev->rev_id;
388 config[no_regs++] = adev->pg_flags;
389 config[no_regs++] = adev->cg_flags;
392 config[no_regs++] = adev->family;
393 config[no_regs++] = adev->external_rev_id;
396 config[no_regs++] = adev->pdev->device;
397 config[no_regs++] = adev->pdev->revision;
398 config[no_regs++] = adev->pdev->subsystem_device;
399 config[no_regs++] = adev->pdev->subsystem_vendor;
401 while (size && (*pos < no_regs * 4)) {
404 value = config[*pos >> 2];
405 r = put_user(value, (uint32_t *)buf);
421 static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
422 size_t size, loff_t *pos)
424 struct amdgpu_device *adev = file_inode(f)->i_private;
425 int idx, x, outsize, r, valuesize;
428 if (size & 3 || *pos & 0x3)
434 /* convert offset to sensor number */
437 valuesize = sizeof(values);
438 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
439 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
443 if (size > valuesize)
450 r = put_user(values[x++], (int32_t *)buf);
457 return !r ? outsize : r;
460 static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
461 size_t size, loff_t *pos)
463 struct amdgpu_device *adev = f->f_inode->i_private;
466 uint32_t offset, se, sh, cu, wave, simd, data[32];
468 if (size & 3 || *pos & 3)
472 offset = (*pos & GENMASK_ULL(6, 0));
473 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
474 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
475 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
476 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
477 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
479 /* switch to the specific se/sh/cu */
480 mutex_lock(&adev->grbm_idx_mutex);
481 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
484 if (adev->gfx.funcs->read_wave_data)
485 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
487 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
488 mutex_unlock(&adev->grbm_idx_mutex);
493 while (size && (offset < x * 4)) {
496 value = data[offset >> 2];
497 r = put_user(value, (uint32_t *)buf);
510 static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
511 size_t size, loff_t *pos)
513 struct amdgpu_device *adev = f->f_inode->i_private;
516 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
518 if (size & 3 || *pos & 3)
522 offset = *pos & GENMASK_ULL(11, 0);
523 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
524 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
525 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
526 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
527 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
528 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
529 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
531 data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
535 /* switch to the specific se/sh/cu */
536 mutex_lock(&adev->grbm_idx_mutex);
537 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
540 if (adev->gfx.funcs->read_wave_vgprs)
541 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
543 if (adev->gfx.funcs->read_wave_sgprs)
544 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
547 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
548 mutex_unlock(&adev->grbm_idx_mutex);
553 value = data[offset++];
554 r = put_user(value, (uint32_t *)buf);
570 static const struct file_operations amdgpu_debugfs_regs_fops = {
571 .owner = THIS_MODULE,
572 .read = amdgpu_debugfs_regs_read,
573 .write = amdgpu_debugfs_regs_write,
574 .llseek = default_llseek
576 static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
577 .owner = THIS_MODULE,
578 .read = amdgpu_debugfs_regs_didt_read,
579 .write = amdgpu_debugfs_regs_didt_write,
580 .llseek = default_llseek
582 static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
583 .owner = THIS_MODULE,
584 .read = amdgpu_debugfs_regs_pcie_read,
585 .write = amdgpu_debugfs_regs_pcie_write,
586 .llseek = default_llseek
588 static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
589 .owner = THIS_MODULE,
590 .read = amdgpu_debugfs_regs_smc_read,
591 .write = amdgpu_debugfs_regs_smc_write,
592 .llseek = default_llseek
595 static const struct file_operations amdgpu_debugfs_gca_config_fops = {
596 .owner = THIS_MODULE,
597 .read = amdgpu_debugfs_gca_config_read,
598 .llseek = default_llseek
601 static const struct file_operations amdgpu_debugfs_sensors_fops = {
602 .owner = THIS_MODULE,
603 .read = amdgpu_debugfs_sensor_read,
604 .llseek = default_llseek
607 static const struct file_operations amdgpu_debugfs_wave_fops = {
608 .owner = THIS_MODULE,
609 .read = amdgpu_debugfs_wave_read,
610 .llseek = default_llseek
612 static const struct file_operations amdgpu_debugfs_gpr_fops = {
613 .owner = THIS_MODULE,
614 .read = amdgpu_debugfs_gpr_read,
615 .llseek = default_llseek
618 static const struct file_operations *debugfs_regs[] = {
619 &amdgpu_debugfs_regs_fops,
620 &amdgpu_debugfs_regs_didt_fops,
621 &amdgpu_debugfs_regs_pcie_fops,
622 &amdgpu_debugfs_regs_smc_fops,
623 &amdgpu_debugfs_gca_config_fops,
624 &amdgpu_debugfs_sensors_fops,
625 &amdgpu_debugfs_wave_fops,
626 &amdgpu_debugfs_gpr_fops,
629 static const char *debugfs_regs_names[] = {
640 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
642 struct drm_minor *minor = adev->ddev->primary;
643 struct dentry *ent, *root = minor->debugfs_root;
646 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
647 ent = debugfs_create_file(debugfs_regs_names[i],
648 S_IFREG | S_IRUGO, root,
649 adev, debugfs_regs[i]);
651 for (j = 0; j < i; j++) {
652 debugfs_remove(adev->debugfs_regs[i]);
653 adev->debugfs_regs[i] = NULL;
659 i_size_write(ent->d_inode, adev->rmmio_size);
660 adev->debugfs_regs[i] = ent;
666 void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
670 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
671 if (adev->debugfs_regs[i]) {
672 debugfs_remove(adev->debugfs_regs[i]);
673 adev->debugfs_regs[i] = NULL;
678 static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
680 struct drm_info_node *node = (struct drm_info_node *) m->private;
681 struct drm_device *dev = node->minor->dev;
682 struct amdgpu_device *adev = dev->dev_private;
685 /* hold on the scheduler */
686 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
687 struct amdgpu_ring *ring = adev->rings[i];
689 if (!ring || !ring->sched.thread)
691 kthread_park(ring->sched.thread);
694 seq_printf(m, "run ib test:\n");
695 r = amdgpu_ib_ring_tests(adev);
697 seq_printf(m, "ib ring tests failed (%d).\n", r);
699 seq_printf(m, "ib ring tests passed.\n");
701 /* go on the scheduler */
702 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
703 struct amdgpu_ring *ring = adev->rings[i];
705 if (!ring || !ring->sched.thread)
707 kthread_unpark(ring->sched.thread);
713 static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
715 struct drm_info_node *node = (struct drm_info_node *) m->private;
716 struct drm_device *dev = node->minor->dev;
717 struct amdgpu_device *adev = dev->dev_private;
719 seq_write(m, adev->bios, adev->bios_size);
723 static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
725 struct drm_info_node *node = (struct drm_info_node *)m->private;
726 struct drm_device *dev = node->minor->dev;
727 struct amdgpu_device *adev = dev->dev_private;
729 seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
733 static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
735 struct drm_info_node *node = (struct drm_info_node *)m->private;
736 struct drm_device *dev = node->minor->dev;
737 struct amdgpu_device *adev = dev->dev_private;
739 seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
743 static const struct drm_info_list amdgpu_debugfs_list[] = {
744 {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
745 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
746 {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
747 {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
750 int amdgpu_debugfs_init(struct amdgpu_device *adev)
752 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
753 ARRAY_SIZE(amdgpu_debugfs_list));
757 int amdgpu_debugfs_init(struct amdgpu_device *adev)
761 int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
765 void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }