1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright IBM Corp. 2008
10 #include <linux/kvm_host.h>
12 #include <linux/seq_file.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
18 #include <asm-generic/div64.h>
22 void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
26 /* Take a lock to avoid concurrent updates */
27 mutex_lock(&vcpu->arch.exit_timing_lock);
29 vcpu->arch.last_exit_type = 0xDEAD;
30 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
31 vcpu->arch.timing_count_type[i] = 0;
32 vcpu->arch.timing_max_duration[i] = 0;
33 vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF;
34 vcpu->arch.timing_sum_duration[i] = 0;
35 vcpu->arch.timing_sum_quad_duration[i] = 0;
37 vcpu->arch.timing_last_exit = 0;
38 vcpu->arch.timing_exit.tv64 = 0;
39 vcpu->arch.timing_last_enter.tv64 = 0;
41 mutex_unlock(&vcpu->arch.exit_timing_lock);
44 static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
48 mutex_lock(&vcpu->arch.exit_timing_lock);
50 vcpu->arch.timing_count_type[type]++;
53 old = vcpu->arch.timing_sum_duration[type];
54 vcpu->arch.timing_sum_duration[type] += duration;
55 if (unlikely(old > vcpu->arch.timing_sum_duration[type])) {
56 printk(KERN_ERR"%s - wrap adding sum of durations"
57 " old %lld new %lld type %d exit # of type %d\n",
58 __func__, old, vcpu->arch.timing_sum_duration[type],
59 type, vcpu->arch.timing_count_type[type]);
63 old = vcpu->arch.timing_sum_quad_duration[type];
64 vcpu->arch.timing_sum_quad_duration[type] += (duration*duration);
65 if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) {
66 printk(KERN_ERR"%s - wrap adding sum of squared durations"
67 " old %lld new %lld type %d exit # of type %d\n",
69 vcpu->arch.timing_sum_quad_duration[type],
70 type, vcpu->arch.timing_count_type[type]);
74 if (unlikely(duration < vcpu->arch.timing_min_duration[type]))
75 vcpu->arch.timing_min_duration[type] = duration;
76 if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
77 vcpu->arch.timing_max_duration[type] = duration;
79 mutex_unlock(&vcpu->arch.exit_timing_lock);
82 void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)
84 u64 exit = vcpu->arch.timing_last_exit;
85 u64 enter = vcpu->arch.timing_last_enter.tv64;
87 /* save exit time, used next exit when the reenter time is known */
88 vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64;
90 if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0))
91 return; /* skip incomplete cycle (e.g. after reset) */
93 /* update statistics for average and standard deviation */
94 add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type);
95 /* enter -> timing_last_exit is time spent in guest - log this too */
96 add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter),
100 static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
101 [MMIO_EXITS] = "MMIO",
102 [SIGNAL_EXITS] = "SIGNAL",
103 [ITLB_REAL_MISS_EXITS] = "ITLBREAL",
104 [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT",
105 [DTLB_REAL_MISS_EXITS] = "DTLBREAL",
106 [DTLB_VIRT_MISS_EXITS] = "DTLBVIRT",
107 [SYSCALL_EXITS] = "SYSCALL",
110 [EMULATED_INST_EXITS] = "EMULINST",
111 [EMULATED_MTMSRWE_EXITS] = "EMUL_WAIT",
112 [EMULATED_WRTEE_EXITS] = "EMUL_WRTEE",
113 [EMULATED_MTSPR_EXITS] = "EMUL_MTSPR",
114 [EMULATED_MFSPR_EXITS] = "EMUL_MFSPR",
115 [EMULATED_MTMSR_EXITS] = "EMUL_MTMSR",
116 [EMULATED_MFMSR_EXITS] = "EMUL_MFMSR",
117 [EMULATED_TLBSX_EXITS] = "EMUL_TLBSX",
118 [EMULATED_TLBWE_EXITS] = "EMUL_TLBWE",
119 [EMULATED_RFI_EXITS] = "EMUL_RFI",
121 [EXT_INTR_EXITS] = "EXTINT",
122 [HALT_WAKEUP] = "HALT",
123 [USR_PR_INST] = "USR_PR_INST",
124 [FP_UNAVAIL] = "FP_UNAVAIL",
125 [DEBUG_EXITS] = "DEBUG",
126 [TIMEINGUEST] = "TIMEINGUEST"
129 static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
131 struct kvm_vcpu *vcpu = m->private;
133 u64 min, max, sum, sum_quad;
135 seq_puts(m, "type count min max sum sum_squared\n");
137 for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
139 min = vcpu->arch.timing_min_duration[i];
140 do_div(min, tb_ticks_per_usec);
141 max = vcpu->arch.timing_max_duration[i];
142 do_div(max, tb_ticks_per_usec);
143 sum = vcpu->arch.timing_sum_duration[i];
144 do_div(sum, tb_ticks_per_usec);
145 sum_quad = vcpu->arch.timing_sum_quad_duration[i];
146 do_div(sum_quad, tb_ticks_per_usec);
148 seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n",
150 vcpu->arch.timing_count_type[i],
160 /* Write 'c' to clear the timing statistics. */
161 static ssize_t kvmppc_exit_timing_write(struct file *file,
162 const char __user *user_buf,
163 size_t count, loff_t *ppos)
172 if (get_user(c, user_buf)) {
178 struct seq_file *seqf = file->private_data;
179 struct kvm_vcpu *vcpu = seqf->private;
180 /* Write does not affect our buffers previously generated with
181 * show. seq_file is locked here to prevent races of init with
183 mutex_lock(&seqf->lock);
184 kvmppc_init_timing_stats(vcpu);
185 mutex_unlock(&seqf->lock);
193 static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
195 return single_open(file, kvmppc_exit_timing_show, inode->i_private);
198 static const struct file_operations kvmppc_exit_timing_fops = {
199 .owner = THIS_MODULE,
200 .open = kvmppc_exit_timing_open,
202 .write = kvmppc_exit_timing_write,
204 .release = single_release,
207 int kvmppc_create_vcpu_debugfs_e500(struct kvm_vcpu *vcpu,
208 struct dentry *debugfs_dentry)
210 debugfs_create_file("timing", 0666, debugfs_dentry,
211 vcpu, &kvmppc_exit_timing_fops);