1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_PMU_H
3 #define __KVM_X86_PMU_H
5 #include <linux/nospec.h>
7 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
8 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
9 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
11 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \
12 MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
14 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
15 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
17 #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
18 #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
19 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
21 struct kvm_event_hw_type_mapping {
28 bool (*hw_event_available)(struct kvm_pmc *pmc);
29 bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
30 struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
31 struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
32 unsigned int idx, u64 *mask);
33 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
34 bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
35 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
36 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
37 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
38 void (*refresh)(struct kvm_vcpu *vcpu);
39 void (*init)(struct kvm_vcpu *vcpu);
40 void (*reset)(struct kvm_vcpu *vcpu);
41 void (*deliver_pmi)(struct kvm_vcpu *vcpu);
42 void (*cleanup)(struct kvm_vcpu *vcpu);
45 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
47 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
49 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
51 return pmu->counter_bitmask[pmc->type];
54 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
56 u64 counter, enabled, running;
58 counter = pmc->counter;
59 if (pmc->perf_event && !pmc->is_paused)
60 counter += perf_event_read_value(pmc->perf_event,
62 /* FIXME: Scaling needed? */
63 return counter & pmc_bitmask(pmc);
66 static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
68 if (pmc->perf_event) {
69 perf_event_release_kernel(pmc->perf_event);
70 pmc->perf_event = NULL;
71 pmc->current_config = 0;
72 pmc_to_pmu(pmc)->event_count--;
76 static inline void pmc_stop_counter(struct kvm_pmc *pmc)
78 if (pmc->perf_event) {
79 pmc->counter = pmc_read_counter(pmc);
80 pmc_release_perf_event(pmc);
84 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
86 return pmc->type == KVM_PMC_GP;
89 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
91 return pmc->type == KVM_PMC_FIXED;
94 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
97 return !(pmu->global_ctrl_mask & data);
100 /* returns general purpose PMC with the specified MSR. Note that it can be
101 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
102 * parameter to tell them apart.
104 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
107 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
108 u32 index = array_index_nospec(msr - base,
109 pmu->nr_arch_gp_counters);
111 return &pmu->gp_counters[index];
117 /* returns fixed PMC with the specified MSR */
118 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
120 int base = MSR_CORE_PERF_FIXED_CTR0;
122 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
123 u32 index = array_index_nospec(msr - base,
124 pmu->nr_arch_fixed_counters);
126 return &pmu->fixed_counters[index];
132 static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
134 u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
137 sample_period = pmc_bitmask(pmc) + 1;
138 return sample_period;
141 static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
143 if (!pmc->perf_event || pmc->is_paused)
146 perf_event_period(pmc->perf_event,
147 get_sample_period(pmc, pmc->counter));
150 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
152 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
154 if (pmc_is_fixed(pmc))
155 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
156 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
158 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
161 extern struct x86_pmu_capability kvm_pmu_cap;
163 static inline void kvm_init_pmu_capability(void)
165 bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
167 perf_get_x86_pmu_capability(&kvm_pmu_cap);
170 * For Intel, only support guest architectural pmu
171 * on a host with architectural pmu.
173 if ((is_intel && !kvm_pmu_cap.version) || !kvm_pmu_cap.num_counters_gp)
177 memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
181 kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
182 kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
186 void reprogram_counter(struct kvm_pmc *pmc);
188 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
189 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
190 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
191 bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
192 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
193 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
194 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
195 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
196 void kvm_pmu_reset(struct kvm_vcpu *vcpu);
197 void kvm_pmu_init(struct kvm_vcpu *vcpu);
198 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
199 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
200 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
201 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
203 bool is_vmware_backdoor_pmc(u32 pmc_idx);
205 extern struct kvm_pmu_ops intel_pmu_ops;
206 extern struct kvm_pmu_ops amd_pmu_ops;
207 #endif /* __KVM_X86_PMU_H */