]> Git Repo - J-linux.git/blob - arch/x86/kvm/mmu/mmutrace.h
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / arch / x86 / kvm / mmu / mmutrace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define _TRACE_KVMMMU_H
4
5 #include <linux/tracepoint.h>
6 #include <linux/trace_events.h>
7
8 #undef TRACE_SYSTEM
9 #define TRACE_SYSTEM kvmmmu
10
11 #define KVM_MMU_PAGE_FIELDS             \
12         __field(__u8, mmu_valid_gen)    \
13         __field(__u64, gfn)             \
14         __field(__u32, role)            \
15         __field(__u32, root_count)      \
16         __field(bool, unsync)
17
18 #define KVM_MMU_PAGE_ASSIGN(sp)                         \
19         __entry->mmu_valid_gen = sp->mmu_valid_gen;     \
20         __entry->gfn = sp->gfn;                         \
21         __entry->role = sp->role.word;                  \
22         __entry->root_count = sp->root_count;           \
23         __entry->unsync = sp->unsync;
24
25 #define KVM_MMU_PAGE_PRINTK() ({                                        \
26         const char *saved_ptr = trace_seq_buffer_ptr(p);                \
27         static const char *access_str[] = {                             \
28                 "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux"  \
29         };                                                              \
30         union kvm_mmu_page_role role;                                   \
31                                                                         \
32         role.word = __entry->role;                                      \
33                                                                         \
34         trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s" \
35                          " %snxe %sad root %u %s%c",                    \
36                          __entry->mmu_valid_gen,                        \
37                          __entry->gfn, role.level,                      \
38                          role.has_4_byte_gpte ? 4 : 8,                  \
39                          role.quadrant,                                 \
40                          role.direct ? " direct" : "",                  \
41                          access_str[role.access],                       \
42                          role.invalid ? " invalid" : "",                \
43                          role.efer_nx ? "" : "!",                       \
44                          role.ad_disabled ? "!" : "",                   \
45                          __entry->root_count,                           \
46                          __entry->unsync ? "unsync" : "sync", 0);       \
47         saved_ptr;                                                      \
48                 })
49
50 #define kvm_mmu_trace_pferr_flags       \
51         { PFERR_PRESENT_MASK, "P" },    \
52         { PFERR_WRITE_MASK, "W" },      \
53         { PFERR_USER_MASK, "U" },       \
54         { PFERR_RSVD_MASK, "RSVD" },    \
55         { PFERR_FETCH_MASK, "F" }
56
57 TRACE_DEFINE_ENUM(RET_PF_CONTINUE);
58 TRACE_DEFINE_ENUM(RET_PF_RETRY);
59 TRACE_DEFINE_ENUM(RET_PF_EMULATE);
60 TRACE_DEFINE_ENUM(RET_PF_WRITE_PROTECTED);
61 TRACE_DEFINE_ENUM(RET_PF_INVALID);
62 TRACE_DEFINE_ENUM(RET_PF_FIXED);
63 TRACE_DEFINE_ENUM(RET_PF_SPURIOUS);
64
65 /*
66  * A pagetable walk has started
67  */
68 TRACE_EVENT(
69         kvm_mmu_pagetable_walk,
70         TP_PROTO(u64 addr, u32 pferr),
71         TP_ARGS(addr, pferr),
72
73         TP_STRUCT__entry(
74                 __field(__u64, addr)
75                 __field(__u32, pferr)
76         ),
77
78         TP_fast_assign(
79                 __entry->addr = addr;
80                 __entry->pferr = pferr;
81         ),
82
83         TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
84                   __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
85 );
86
87
88 /* We just walked a paging element */
89 TRACE_EVENT(
90         kvm_mmu_paging_element,
91         TP_PROTO(u64 pte, int level),
92         TP_ARGS(pte, level),
93
94         TP_STRUCT__entry(
95                 __field(__u64, pte)
96                 __field(__u32, level)
97                 ),
98
99         TP_fast_assign(
100                 __entry->pte = pte;
101                 __entry->level = level;
102                 ),
103
104         TP_printk("pte %llx level %u", __entry->pte, __entry->level)
105 );
106
107 DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class,
108
109         TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
110
111         TP_ARGS(table_gfn, index, size),
112
113         TP_STRUCT__entry(
114                 __field(__u64, gpa)
115         ),
116
117         TP_fast_assign(
118                 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
119                                 + index * size;
120                 ),
121
122         TP_printk("gpa %llx", __entry->gpa)
123 );
124
125 /* We set a pte accessed bit */
126 DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
127
128         TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
129
130         TP_ARGS(table_gfn, index, size)
131 );
132
133 /* We set a pte dirty bit */
134 DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
135
136         TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
137
138         TP_ARGS(table_gfn, index, size)
139 );
140
141 TRACE_EVENT(
142         kvm_mmu_walker_error,
143         TP_PROTO(u32 pferr),
144         TP_ARGS(pferr),
145
146         TP_STRUCT__entry(
147                 __field(__u32, pferr)
148                 ),
149
150         TP_fast_assign(
151                 __entry->pferr = pferr;
152                 ),
153
154         TP_printk("pferr %x %s", __entry->pferr,
155                   __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
156 );
157
158 TRACE_EVENT(
159         kvm_mmu_get_page,
160         TP_PROTO(struct kvm_mmu_page *sp, bool created),
161         TP_ARGS(sp, created),
162
163         TP_STRUCT__entry(
164                 KVM_MMU_PAGE_FIELDS
165                 __field(bool, created)
166                 ),
167
168         TP_fast_assign(
169                 KVM_MMU_PAGE_ASSIGN(sp)
170                 __entry->created = created;
171                 ),
172
173         TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
174                   __entry->created ? "new" : "existing")
175 );
176
177 DECLARE_EVENT_CLASS(kvm_mmu_page_class,
178
179         TP_PROTO(struct kvm_mmu_page *sp),
180         TP_ARGS(sp),
181
182         TP_STRUCT__entry(
183                 KVM_MMU_PAGE_FIELDS
184         ),
185
186         TP_fast_assign(
187                 KVM_MMU_PAGE_ASSIGN(sp)
188         ),
189
190         TP_printk("%s", KVM_MMU_PAGE_PRINTK())
191 );
192
193 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page,
194         TP_PROTO(struct kvm_mmu_page *sp),
195
196         TP_ARGS(sp)
197 );
198
199 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page,
200         TP_PROTO(struct kvm_mmu_page *sp),
201
202         TP_ARGS(sp)
203 );
204
205 DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
206         TP_PROTO(struct kvm_mmu_page *sp),
207
208         TP_ARGS(sp)
209 );
210
211 TRACE_EVENT(
212         mark_mmio_spte,
213         TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
214         TP_ARGS(sptep, gfn, spte),
215
216         TP_STRUCT__entry(
217                 __field(void *, sptep)
218                 __field(gfn_t, gfn)
219                 __field(unsigned, access)
220                 __field(unsigned int, gen)
221         ),
222
223         TP_fast_assign(
224                 __entry->sptep = sptep;
225                 __entry->gfn = gfn;
226                 __entry->access = spte & ACC_ALL;
227                 __entry->gen = get_mmio_spte_generation(spte);
228         ),
229
230         TP_printk("sptep:%p gfn %llx access %x gen %x", __entry->sptep,
231                   __entry->gfn, __entry->access, __entry->gen)
232 );
233
234 TRACE_EVENT(
235         handle_mmio_page_fault,
236         TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
237         TP_ARGS(addr, gfn, access),
238
239         TP_STRUCT__entry(
240                 __field(u64, addr)
241                 __field(gfn_t, gfn)
242                 __field(unsigned, access)
243         ),
244
245         TP_fast_assign(
246                 __entry->addr = addr;
247                 __entry->gfn = gfn;
248                 __entry->access = access;
249         ),
250
251         TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
252                   __entry->access)
253 );
254
255 TRACE_EVENT(
256         fast_page_fault,
257         TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
258                  u64 *sptep, u64 old_spte, int ret),
259         TP_ARGS(vcpu, fault, sptep, old_spte, ret),
260
261         TP_STRUCT__entry(
262                 __field(int, vcpu_id)
263                 __field(gpa_t, cr2_or_gpa)
264                 __field(u64, error_code)
265                 __field(u64 *, sptep)
266                 __field(u64, old_spte)
267                 __field(u64, new_spte)
268                 __field(int, ret)
269         ),
270
271         TP_fast_assign(
272                 __entry->vcpu_id = vcpu->vcpu_id;
273                 __entry->cr2_or_gpa = fault->addr;
274                 __entry->error_code = fault->error_code;
275                 __entry->sptep = sptep;
276                 __entry->old_spte = old_spte;
277                 __entry->new_spte = *sptep;
278                 __entry->ret = ret;
279         ),
280
281         TP_printk("vcpu %d gva %llx error_code %s sptep %p old %#llx"
282                   " new %llx spurious %d fixed %d", __entry->vcpu_id,
283                   __entry->cr2_or_gpa, __print_flags(__entry->error_code, "|",
284                   kvm_mmu_trace_pferr_flags), __entry->sptep,
285                   __entry->old_spte, __entry->new_spte,
286                   __entry->ret == RET_PF_SPURIOUS, __entry->ret == RET_PF_FIXED
287         )
288 );
289
290 TRACE_EVENT(
291         kvm_mmu_zap_all_fast,
292         TP_PROTO(struct kvm *kvm),
293         TP_ARGS(kvm),
294
295         TP_STRUCT__entry(
296                 __field(__u8, mmu_valid_gen)
297                 __field(unsigned int, mmu_used_pages)
298         ),
299
300         TP_fast_assign(
301                 __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen;
302                 __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages;
303         ),
304
305         TP_printk("kvm-mmu-valid-gen %u used_pages %x",
306                   __entry->mmu_valid_gen, __entry->mmu_used_pages
307         )
308 );
309
310
311 TRACE_EVENT(
312         check_mmio_spte,
313         TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen),
314         TP_ARGS(spte, kvm_gen, spte_gen),
315
316         TP_STRUCT__entry(
317                 __field(unsigned int, kvm_gen)
318                 __field(unsigned int, spte_gen)
319                 __field(u64, spte)
320         ),
321
322         TP_fast_assign(
323                 __entry->kvm_gen = kvm_gen;
324                 __entry->spte_gen = spte_gen;
325                 __entry->spte = spte;
326         ),
327
328         TP_printk("spte %llx kvm_gen %x spte-gen %x valid %d", __entry->spte,
329                   __entry->kvm_gen, __entry->spte_gen,
330                   __entry->kvm_gen == __entry->spte_gen
331         )
332 );
333
334 TRACE_EVENT(
335         kvm_mmu_set_spte,
336         TP_PROTO(int level, gfn_t gfn, u64 *sptep),
337         TP_ARGS(level, gfn, sptep),
338
339         TP_STRUCT__entry(
340                 __field(u64, gfn)
341                 __field(u64, spte)
342                 __field(u64, sptep)
343                 __field(u8, level)
344                 /* These depend on page entry type, so compute them now.  */
345                 __field(bool, r)
346                 __field(bool, x)
347                 __field(signed char, u)
348         ),
349
350         TP_fast_assign(
351                 __entry->gfn = gfn;
352                 __entry->spte = *sptep;
353                 __entry->sptep = virt_to_phys(sptep);
354                 __entry->level = level;
355                 __entry->r = shadow_present_mask || (__entry->spte & PT_PRESENT_MASK);
356                 __entry->x = is_executable_pte(__entry->spte);
357                 __entry->u = shadow_user_mask ? !!(__entry->spte & shadow_user_mask) : -1;
358         ),
359
360         TP_printk("gfn %llx spte %llx (%s%s%s%s) level %d at %llx",
361                   __entry->gfn, __entry->spte,
362                   __entry->r ? "r" : "-",
363                   __entry->spte & PT_WRITABLE_MASK ? "w" : "-",
364                   __entry->x ? "x" : "-",
365                   __entry->u == -1 ? "" : (__entry->u ? "u" : "-"),
366                   __entry->level, __entry->sptep
367         )
368 );
369
370 TRACE_EVENT(
371         kvm_mmu_spte_requested,
372         TP_PROTO(struct kvm_page_fault *fault),
373         TP_ARGS(fault),
374
375         TP_STRUCT__entry(
376                 __field(u64, gfn)
377                 __field(u64, pfn)
378                 __field(u8, level)
379         ),
380
381         TP_fast_assign(
382                 __entry->gfn = fault->gfn;
383                 __entry->pfn = fault->pfn | (fault->gfn & (KVM_PAGES_PER_HPAGE(fault->goal_level) - 1));
384                 __entry->level = fault->goal_level;
385         ),
386
387         TP_printk("gfn %llx pfn %llx level %d",
388                   __entry->gfn, __entry->pfn, __entry->level
389         )
390 );
391
392 TRACE_EVENT(
393         kvm_tdp_mmu_spte_changed,
394         TP_PROTO(int as_id, gfn_t gfn, int level, u64 old_spte, u64 new_spte),
395         TP_ARGS(as_id, gfn, level, old_spte, new_spte),
396
397         TP_STRUCT__entry(
398                 __field(u64, gfn)
399                 __field(u64, old_spte)
400                 __field(u64, new_spte)
401                 /* Level cannot be larger than 5 on x86, so it fits in a u8. */
402                 __field(u8, level)
403                 /* as_id can only be 0 or 1 x86, so it fits in a u8. */
404                 __field(u8, as_id)
405         ),
406
407         TP_fast_assign(
408                 __entry->gfn = gfn;
409                 __entry->old_spte = old_spte;
410                 __entry->new_spte = new_spte;
411                 __entry->level = level;
412                 __entry->as_id = as_id;
413         ),
414
415         TP_printk("as id %d gfn %llx level %d old_spte %llx new_spte %llx",
416                   __entry->as_id, __entry->gfn, __entry->level,
417                   __entry->old_spte, __entry->new_spte
418         )
419 );
420
421 TRACE_EVENT(
422         kvm_mmu_split_huge_page,
423         TP_PROTO(u64 gfn, u64 spte, int level, int errno),
424         TP_ARGS(gfn, spte, level, errno),
425
426         TP_STRUCT__entry(
427                 __field(u64, gfn)
428                 __field(u64, spte)
429                 __field(int, level)
430                 __field(int, errno)
431         ),
432
433         TP_fast_assign(
434                 __entry->gfn = gfn;
435                 __entry->spte = spte;
436                 __entry->level = level;
437                 __entry->errno = errno;
438         ),
439
440         TP_printk("gfn %llx spte %llx level %d errno %d",
441                   __entry->gfn, __entry->spte, __entry->level, __entry->errno)
442 );
443
444 #endif /* _TRACE_KVMMMU_H */
445
446 #undef TRACE_INCLUDE_PATH
447 #define TRACE_INCLUDE_PATH mmu
448 #undef TRACE_INCLUDE_FILE
449 #define TRACE_INCLUDE_FILE mmutrace
450
451 /* This part must be outside protection */
452 #include <trace/define_trace.h>
This page took 0.052073 seconds and 4 git commands to generate.