]>
Commit | Line | Data |
---|---|---|
2f4f3372 XG |
1 | /* |
2 | * mmu_audit.c: | |
3 | * | |
4 | * Audit code for KVM MMU | |
5 | * | |
6 | * Copyright (C) 2006 Qumranet, Inc. | |
9611c187 | 7 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
2f4f3372 XG |
8 | * |
9 | * Authors: | |
10 | * Yaniv Kamay <[email protected]> | |
11 | * Avi Kivity <[email protected]> | |
12 | * Marcelo Tosatti <[email protected]> | |
13 | * Xiao Guangrong <[email protected]> | |
14 | * | |
15 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
16 | * the COPYING file in the top-level directory. | |
17 | * | |
18 | */ | |
19 | ||
30644b90 XG |
20 | #include <linux/ratelimit.h> |
21 | ||
e37fa785 XG |
22 | char const *audit_point_name[] = { |
23 | "pre page fault", | |
24 | "post page fault", | |
25 | "pre pte write", | |
26 | "post pte write", | |
27 | "pre sync", | |
28 | "post sync" | |
29 | }; | |
30 | ||
b034cf01 | 31 | #define audit_printk(kvm, fmt, args...) \ |
38904e12 | 32 | printk(KERN_ERR "audit: (%s) error: " \ |
b034cf01 | 33 | fmt, audit_point_name[kvm->arch.audit_point], ##args) |
2f4f3372 | 34 | |
eb259186 | 35 | typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level); |
2f4f3372 | 36 | |
eb259186 XG |
37 | static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
38 | inspect_spte_fn fn, int level) | |
2f4f3372 XG |
39 | { |
40 | int i; | |
41 | ||
42 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | |
eb259186 XG |
43 | u64 *ent = sp->spt; |
44 | ||
45 | fn(vcpu, ent + i, level); | |
46 | ||
47 | if (is_shadow_present_pte(ent[i]) && | |
48 | !is_last_spte(ent[i], level)) { | |
49 | struct kvm_mmu_page *child; | |
50 | ||
51 | child = page_header(ent[i] & PT64_BASE_ADDR_MASK); | |
52 | __mmu_spte_walk(vcpu, child, fn, level - 1); | |
2f4f3372 XG |
53 | } |
54 | } | |
55 | } | |
56 | ||
57 | static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) | |
58 | { | |
59 | int i; | |
60 | struct kvm_mmu_page *sp; | |
61 | ||
62 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) | |
63 | return; | |
eb259186 | 64 | |
98224bf1 | 65 | if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { |
2f4f3372 | 66 | hpa_t root = vcpu->arch.mmu.root_hpa; |
eb259186 | 67 | |
2f4f3372 | 68 | sp = page_header(root); |
eb259186 | 69 | __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL); |
2f4f3372 XG |
70 | return; |
71 | } | |
eb259186 | 72 | |
2f4f3372 XG |
73 | for (i = 0; i < 4; ++i) { |
74 | hpa_t root = vcpu->arch.mmu.pae_root[i]; | |
75 | ||
76 | if (root && VALID_PAGE(root)) { | |
77 | root &= PT64_BASE_ADDR_MASK; | |
78 | sp = page_header(root); | |
eb259186 | 79 | __mmu_spte_walk(vcpu, sp, fn, 2); |
2f4f3372 XG |
80 | } |
81 | } | |
eb259186 | 82 | |
2f4f3372 XG |
83 | return; |
84 | } | |
85 | ||
49edf878 XG |
86 | typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp); |
87 | ||
88 | static void walk_all_active_sps(struct kvm *kvm, sp_handler fn) | |
89 | { | |
90 | struct kvm_mmu_page *sp; | |
91 | ||
92 | list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) | |
93 | fn(kvm, sp); | |
94 | } | |
95 | ||
eb259186 | 96 | static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) |
2f4f3372 | 97 | { |
eb259186 XG |
98 | struct kvm_mmu_page *sp; |
99 | gfn_t gfn; | |
100 | pfn_t pfn; | |
101 | hpa_t hpa; | |
2f4f3372 | 102 | |
eb259186 XG |
103 | sp = page_header(__pa(sptep)); |
104 | ||
105 | if (sp->unsync) { | |
106 | if (level != PT_PAGE_TABLE_LEVEL) { | |
b034cf01 XG |
107 | audit_printk(vcpu->kvm, "unsync sp: %p " |
108 | "level = %d\n", sp, level); | |
2f4f3372 XG |
109 | return; |
110 | } | |
eb259186 | 111 | } |
2f4f3372 | 112 | |
eb259186 XG |
113 | if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level)) |
114 | return; | |
2f4f3372 | 115 | |
eb259186 XG |
116 | gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); |
117 | pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); | |
2f4f3372 | 118 | |
eb259186 XG |
119 | if (is_error_pfn(pfn)) { |
120 | kvm_release_pfn_clean(pfn); | |
121 | return; | |
2f4f3372 | 122 | } |
2f4f3372 | 123 | |
eb259186 XG |
124 | hpa = pfn << PAGE_SHIFT; |
125 | if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) | |
b034cf01 XG |
126 | audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx " |
127 | "ent %llxn", vcpu->arch.mmu.root_level, pfn, | |
128 | hpa, *sptep); | |
2f4f3372 XG |
129 | } |
130 | ||
eb259186 | 131 | static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) |
2f4f3372 | 132 | { |
bd80158a | 133 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); |
2f4f3372 XG |
134 | unsigned long *rmapp; |
135 | struct kvm_mmu_page *rev_sp; | |
136 | gfn_t gfn; | |
137 | ||
2f4f3372 XG |
138 | rev_sp = page_header(__pa(sptep)); |
139 | gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); | |
140 | ||
141 | if (!gfn_to_memslot(kvm, gfn)) { | |
bd80158a | 142 | if (!__ratelimit(&ratelimit_state)) |
2f4f3372 | 143 | return; |
b034cf01 XG |
144 | audit_printk(kvm, "no memslot for gfn %llx\n", gfn); |
145 | audit_printk(kvm, "index %ld of sp (gfn=%llx)\n", | |
38904e12 | 146 | (long int)(sptep - rev_sp->spt), rev_sp->gfn); |
2f4f3372 XG |
147 | dump_stack(); |
148 | return; | |
149 | } | |
150 | ||
151 | rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); | |
152 | if (!*rmapp) { | |
bd80158a | 153 | if (!__ratelimit(&ratelimit_state)) |
2f4f3372 | 154 | return; |
b034cf01 XG |
155 | audit_printk(kvm, "no rmap for writable spte %llx\n", |
156 | *sptep); | |
2f4f3372 XG |
157 | dump_stack(); |
158 | } | |
159 | } | |
160 | ||
eb259186 | 161 | static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level) |
2f4f3372 | 162 | { |
eb259186 XG |
163 | if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level)) |
164 | inspect_spte_has_rmap(vcpu->kvm, sptep); | |
2f4f3372 XG |
165 | } |
166 | ||
6903074c XG |
167 | static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level) |
168 | { | |
169 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); | |
170 | ||
b034cf01 XG |
171 | if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync) |
172 | audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync " | |
173 | "root.\n", sp); | |
6903074c XG |
174 | } |
175 | ||
49edf878 | 176 | static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp) |
2f4f3372 | 177 | { |
2f4f3372 XG |
178 | int i; |
179 | ||
49edf878 XG |
180 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) |
181 | return; | |
2f4f3372 | 182 | |
49edf878 XG |
183 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
184 | if (!is_rmap_spte(sp->spt[i])) | |
2f4f3372 XG |
185 | continue; |
186 | ||
49edf878 | 187 | inspect_spte_has_rmap(kvm, sp->spt + i); |
2f4f3372 | 188 | } |
2f4f3372 XG |
189 | } |
190 | ||
6903074c | 191 | static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) |
2f4f3372 | 192 | { |
2f4f3372 XG |
193 | struct kvm_memory_slot *slot; |
194 | unsigned long *rmapp; | |
1e3f42f0 TY |
195 | u64 *sptep; |
196 | struct rmap_iterator iter; | |
2f4f3372 | 197 | |
49edf878 XG |
198 | if (sp->role.direct || sp->unsync || sp->role.invalid) |
199 | return; | |
2f4f3372 | 200 | |
49edf878 XG |
201 | slot = gfn_to_memslot(kvm, sp->gfn); |
202 | rmapp = &slot->rmap[sp->gfn - slot->base_gfn]; | |
2f4f3372 | 203 | |
1e3f42f0 TY |
204 | for (sptep = rmap_get_first(*rmapp, &iter); sptep; |
205 | sptep = rmap_get_next(&iter)) { | |
206 | if (is_writable_pte(*sptep)) | |
b034cf01 XG |
207 | audit_printk(kvm, "shadow page has writable " |
208 | "mappings: gfn %llx role %x\n", | |
209 | sp->gfn, sp->role.word); | |
2f4f3372 XG |
210 | } |
211 | } | |
212 | ||
49edf878 XG |
213 | static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp) |
214 | { | |
215 | check_mappings_rmap(kvm, sp); | |
216 | audit_write_protection(kvm, sp); | |
217 | } | |
218 | ||
219 | static void audit_all_active_sps(struct kvm *kvm) | |
220 | { | |
221 | walk_all_active_sps(kvm, audit_sp); | |
222 | } | |
223 | ||
eb259186 XG |
224 | static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level) |
225 | { | |
226 | audit_sptes_have_rmaps(vcpu, sptep, level); | |
227 | audit_mappings(vcpu, sptep, level); | |
6903074c | 228 | audit_spte_after_sync(vcpu, sptep, level); |
eb259186 XG |
229 | } |
230 | ||
231 | static void audit_vcpu_spte(struct kvm_vcpu *vcpu) | |
232 | { | |
233 | mmu_spte_walk(vcpu, audit_spte); | |
234 | } | |
235 | ||
0375f7fa | 236 | static bool mmu_audit; |
c5905afb | 237 | static struct static_key mmu_audit_key; |
0375f7fa | 238 | |
e37fa785 | 239 | static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) |
2f4f3372 | 240 | { |
30644b90 XG |
241 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); |
242 | ||
e37fa785 XG |
243 | if (!__ratelimit(&ratelimit_state)) |
244 | return; | |
30644b90 | 245 | |
e37fa785 XG |
246 | vcpu->kvm->arch.audit_point = point; |
247 | audit_all_active_sps(vcpu->kvm); | |
248 | audit_vcpu_spte(vcpu); | |
249 | } | |
250 | ||
251 | static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) | |
252 | { | |
c5905afb | 253 | if (static_key_false((&mmu_audit_key))) |
e37fa785 | 254 | __kvm_mmu_audit(vcpu, point); |
2f4f3372 XG |
255 | } |
256 | ||
2f4f3372 XG |
257 | static void mmu_audit_enable(void) |
258 | { | |
2f4f3372 XG |
259 | if (mmu_audit) |
260 | return; | |
261 | ||
c5905afb | 262 | static_key_slow_inc(&mmu_audit_key); |
2f4f3372 XG |
263 | mmu_audit = true; |
264 | } | |
265 | ||
266 | static void mmu_audit_disable(void) | |
267 | { | |
268 | if (!mmu_audit) | |
269 | return; | |
270 | ||
c5905afb | 271 | static_key_slow_dec(&mmu_audit_key); |
2f4f3372 XG |
272 | mmu_audit = false; |
273 | } | |
274 | ||
275 | static int mmu_audit_set(const char *val, const struct kernel_param *kp) | |
276 | { | |
277 | int ret; | |
278 | unsigned long enable; | |
279 | ||
280 | ret = strict_strtoul(val, 10, &enable); | |
281 | if (ret < 0) | |
282 | return -EINVAL; | |
283 | ||
284 | switch (enable) { | |
285 | case 0: | |
286 | mmu_audit_disable(); | |
287 | break; | |
288 | case 1: | |
289 | mmu_audit_enable(); | |
290 | break; | |
291 | default: | |
292 | return -EINVAL; | |
293 | } | |
294 | ||
295 | return 0; | |
296 | } | |
297 | ||
298 | static struct kernel_param_ops audit_param_ops = { | |
299 | .set = mmu_audit_set, | |
300 | .get = param_get_bool, | |
301 | }; | |
302 | ||
303 | module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644); |