]> Git Repo - linux.git/blob - arch/arm64/kvm/pmu.c
Linux 6.14-rc3
[linux.git] / arch / arm64 / kvm / pmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2019 Arm Limited
4  * Author: Andrew Murray <[email protected]>
5  */
6 #include <linux/kvm_host.h>
7 #include <linux/perf_event.h>
8 #include <linux/perf/arm_pmu.h>
9 #include <linux/perf/arm_pmuv3.h>
10
11 static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events);
12
13 /*
14  * Given the perf event attributes and system type, determine
15  * if we are going to need to switch counters at guest entry/exit.
16  */
17 static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
18 {
19         /**
20          * With VHE the guest kernel runs at EL1 and the host at EL2,
21          * where user (EL0) is excluded then we have no reason to switch
22          * counters.
23          */
24         if (has_vhe() && attr->exclude_user)
25                 return false;
26
27         /* Only switch if attributes are different */
28         return (attr->exclude_host != attr->exclude_guest);
29 }
30
31 struct kvm_pmu_events *kvm_get_pmu_events(void)
32 {
33         return this_cpu_ptr(&kvm_pmu_events);
34 }
35
36 /*
37  * Add events to track that we may want to switch at guest entry/exit
38  * time.
39  */
40 void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr)
41 {
42         struct kvm_pmu_events *pmu = kvm_get_pmu_events();
43
44         if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
45                 return;
46
47         if (!attr->exclude_host)
48                 pmu->events_host |= set;
49         if (!attr->exclude_guest)
50                 pmu->events_guest |= set;
51 }
52
53 /*
54  * Stop tracking events
55  */
56 void kvm_clr_pmu_events(u64 clr)
57 {
58         struct kvm_pmu_events *pmu = kvm_get_pmu_events();
59
60         if (!kvm_arm_support_pmu_v3())
61                 return;
62
63         pmu->events_host &= ~clr;
64         pmu->events_guest &= ~clr;
65 }
66
67 /*
68  * Read a value direct from PMEVTYPER<idx> where idx is 0-30
69  * or PMxCFILTR_EL0 where idx is 31-32.
70  */
71 static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
72 {
73         if (idx == ARMV8_PMU_CYCLE_IDX)
74                 return read_pmccfiltr();
75         else if (idx == ARMV8_PMU_INSTR_IDX)
76                 return read_pmicfiltr();
77
78         return read_pmevtypern(idx);
79 }
80
81 /*
82  * Write a value direct to PMEVTYPER<idx> where idx is 0-30
83  * or PMxCFILTR_EL0 where idx is 31-32.
84  */
85 static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
86 {
87         if (idx == ARMV8_PMU_CYCLE_IDX)
88                 write_pmccfiltr(val);
89         else if (idx == ARMV8_PMU_INSTR_IDX)
90                 write_pmicfiltr(val);
91         else
92                 write_pmevtypern(idx, val);
93 }
94
95 /*
96  * Modify ARMv8 PMU events to include EL0 counting
97  */
98 static void kvm_vcpu_pmu_enable_el0(unsigned long events)
99 {
100         u64 typer;
101         u32 counter;
102
103         for_each_set_bit(counter, &events, ARMPMU_MAX_HWEVENTS) {
104                 typer = kvm_vcpu_pmu_read_evtype_direct(counter);
105                 typer &= ~ARMV8_PMU_EXCLUDE_EL0;
106                 kvm_vcpu_pmu_write_evtype_direct(counter, typer);
107         }
108 }
109
110 /*
111  * Modify ARMv8 PMU events to exclude EL0 counting
112  */
113 static void kvm_vcpu_pmu_disable_el0(unsigned long events)
114 {
115         u64 typer;
116         u32 counter;
117
118         for_each_set_bit(counter, &events, ARMPMU_MAX_HWEVENTS) {
119                 typer = kvm_vcpu_pmu_read_evtype_direct(counter);
120                 typer |= ARMV8_PMU_EXCLUDE_EL0;
121                 kvm_vcpu_pmu_write_evtype_direct(counter, typer);
122         }
123 }
124
125 /*
126  * On VHE ensure that only guest events have EL0 counting enabled.
127  * This is called from both vcpu_{load,put} and the sysreg handling.
128  * Since the latter is preemptible, special care must be taken to
129  * disable preemption.
130  */
131 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
132 {
133         struct kvm_pmu_events *pmu;
134         u64 events_guest, events_host;
135
136         if (!kvm_arm_support_pmu_v3() || !has_vhe())
137                 return;
138
139         preempt_disable();
140         pmu = kvm_get_pmu_events();
141         events_guest = pmu->events_guest;
142         events_host = pmu->events_host;
143
144         kvm_vcpu_pmu_enable_el0(events_guest);
145         kvm_vcpu_pmu_disable_el0(events_host);
146         preempt_enable();
147 }
148
149 /*
150  * On VHE ensure that only host events have EL0 counting enabled
151  */
152 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
153 {
154         struct kvm_pmu_events *pmu;
155         u64 events_guest, events_host;
156
157         if (!kvm_arm_support_pmu_v3() || !has_vhe())
158                 return;
159
160         pmu = kvm_get_pmu_events();
161         events_guest = pmu->events_guest;
162         events_host = pmu->events_host;
163
164         kvm_vcpu_pmu_enable_el0(events_host);
165         kvm_vcpu_pmu_disable_el0(events_guest);
166 }
167
168 /*
169  * With VHE, keep track of the PMUSERENR_EL0 value for the host EL0 on the pCPU
170  * where PMUSERENR_EL0 for the guest is loaded, since PMUSERENR_EL0 is switched
171  * to the value for the guest on vcpu_load().  The value for the host EL0
172  * will be restored on vcpu_put(), before returning to userspace.
173  * This isn't necessary for nVHE, as the register is context switched for
174  * every guest enter/exit.
175  *
176  * Return true if KVM takes care of the register. Otherwise return false.
177  */
178 bool kvm_set_pmuserenr(u64 val)
179 {
180         struct kvm_cpu_context *hctxt;
181         struct kvm_vcpu *vcpu;
182
183         if (!kvm_arm_support_pmu_v3() || !has_vhe())
184                 return false;
185
186         vcpu = kvm_get_running_vcpu();
187         if (!vcpu || !vcpu_get_flag(vcpu, PMUSERENR_ON_CPU))
188                 return false;
189
190         hctxt = host_data_ptr(host_ctxt);
191         ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val;
192         return true;
193 }
194
195 /*
196  * If we interrupted the guest to update the host PMU context, make
197  * sure we re-apply the guest EL0 state.
198  */
199 void kvm_vcpu_pmu_resync_el0(void)
200 {
201         struct kvm_vcpu *vcpu;
202
203         if (!has_vhe() || !in_interrupt())
204                 return;
205
206         vcpu = kvm_get_running_vcpu();
207         if (!vcpu)
208                 return;
209
210         kvm_make_request(KVM_REQ_RESYNC_PMU_EL0, vcpu);
211 }
This page took 0.039472 seconds and 4 git commands to generate.