1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
7 /* For profiling, userspace can:
9 * tail -f /sys/kernel/debug/dri/<minor>/gpu
11 * This will enable performance counters/profiling to track the busy time
12 * and any gpu specific performance counters that are supported.
15 #ifdef CONFIG_DEBUG_FS
17 #include <linux/debugfs.h>
18 #include <linux/uaccess.h>
20 #include <drm/drm_file.h>
25 struct msm_perf_state {
26 struct drm_device *dev;
30 struct mutex read_lock;
35 unsigned long next_jiffies;
38 #define SAMPLE_TIME (HZ/4)
40 /* wait for next sample time: */
41 static int wait_sample(struct msm_perf_state *perf)
43 unsigned long start_jiffies = jiffies;
45 if (time_after(perf->next_jiffies, start_jiffies)) {
46 unsigned long remaining_jiffies =
47 perf->next_jiffies - start_jiffies;
48 int ret = schedule_timeout_interruptible(remaining_jiffies);
54 perf->next_jiffies += SAMPLE_TIME;
58 static int refill_buf(struct msm_perf_state *perf)
60 struct msm_drm_private *priv = perf->dev->dev_private;
61 struct msm_gpu *gpu = priv->gpu;
62 char *ptr = perf->buf;
63 int rem = sizeof(perf->buf);
66 if ((perf->cnt++ % 32) == 0) {
68 n = snprintf(ptr, rem, "%%BUSY");
72 for (i = 0; i < gpu->num_perfcntrs; i++) {
73 const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i];
74 n = snprintf(ptr, rem, "\t%s", perfcntr->name);
80 uint32_t activetime = 0, totaltime = 0;
85 /* sleep until next sample time: */
86 ret = wait_sample(perf);
90 ret = msm_gpu_perfcntr_sample(gpu, &activetime, &totaltime,
91 ARRAY_SIZE(cntrs), cntrs);
95 val = totaltime ? 1000 * activetime / totaltime : 0;
96 n = snprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10);
100 for (i = 0; i < ret; i++) {
101 /* cycle counters (I think).. convert to MHz.. */
102 val = cntrs[i] / 10000;
103 n = snprintf(ptr, rem, "\t%5d.%02d",
104 val / 100, val % 100);
110 n = snprintf(ptr, rem, "\n");
115 perf->buftot = ptr - perf->buf;
120 static ssize_t perf_read(struct file *file, char __user *buf,
121 size_t sz, loff_t *ppos)
123 struct msm_perf_state *perf = file->private_data;
126 mutex_lock(&perf->read_lock);
128 if (perf->bufpos >= perf->buftot) {
129 ret = refill_buf(perf);
134 n = min((int)sz, perf->buftot - perf->bufpos);
135 if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) {
144 mutex_unlock(&perf->read_lock);
150 static int perf_open(struct inode *inode, struct file *file)
152 struct msm_perf_state *perf = inode->i_private;
153 struct drm_device *dev = perf->dev;
154 struct msm_drm_private *priv = dev->dev_private;
155 struct msm_gpu *gpu = priv->gpu;
158 mutex_lock(&dev->struct_mutex);
160 if (perf->open || !gpu) {
165 file->private_data = perf;
170 msm_gpu_perfcntr_start(gpu);
171 perf->next_jiffies = jiffies + SAMPLE_TIME;
174 mutex_unlock(&dev->struct_mutex);
178 static int perf_release(struct inode *inode, struct file *file)
180 struct msm_perf_state *perf = inode->i_private;
181 struct msm_drm_private *priv = perf->dev->dev_private;
182 msm_gpu_perfcntr_stop(priv->gpu);
188 static const struct file_operations perf_debugfs_fops = {
189 .owner = THIS_MODULE,
193 .release = perf_release,
196 int msm_perf_debugfs_init(struct drm_minor *minor)
198 struct msm_drm_private *priv = minor->dev->dev_private;
199 struct msm_perf_state *perf;
201 /* only create on first minor: */
205 perf = kzalloc(sizeof(*perf), GFP_KERNEL);
209 perf->dev = minor->dev;
211 mutex_init(&perf->read_lock);
214 debugfs_create_file("perf", S_IFREG | S_IRUGO, minor->debugfs_root,
215 perf, &perf_debugfs_fops);
219 void msm_perf_debugfs_cleanup(struct msm_drm_private *priv)
221 struct msm_perf_state *perf = priv->perf;
228 mutex_destroy(&perf->read_lock);