1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef PERF_LOCK_CONTENTION_H
3 #define PERF_LOCK_CONTENTION_H
5 #include <linux/list.h>
6 #include <linux/rbtree.h>
22 struct hlist_node hash_entry;
23 struct rb_node rb; /* used for sorting */
25 u64 addr; /* address of lockdep_map, used as ID */
26 char *name; /* for strcpy(), we cannot use const */
29 unsigned int nr_acquire;
30 unsigned int nr_acquired;
31 unsigned int nr_contended;
32 unsigned int nr_release;
35 unsigned int nr_readlock;
38 unsigned int nr_trylock;
40 /* these times are in nano sec. */
46 int broken; /* flag of blacklist */
51 * States of lock_seq_stat
53 * UNINITIALIZED is required for detecting first event of acquire.
54 * As the nature of lock events, there is no guarantee
55 * that the first event for the locks are acquire,
56 * it can be acquired, contended or release.
58 #define SEQ_STATE_UNINITIALIZED 0 /* initial state */
59 #define SEQ_STATE_RELEASED 1
60 #define SEQ_STATE_ACQUIRING 2
61 #define SEQ_STATE_ACQUIRED 3
62 #define SEQ_STATE_READ_ACQUIRED 4
63 #define SEQ_STATE_CONTENDED 5
67 * Imported from include/linux/sched.h.
68 * Should this be synchronized?
70 #define MAX_LOCK_DEPTH 48
72 /* based on kernel/lockdep.c */
73 #define LOCKHASH_BITS 12
74 #define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
76 extern struct hlist_head *lockhash_table;
79 * struct lock_seq_stat:
80 * Place to put on state of one lock sequence
81 * 1) acquire -> acquired -> release
82 * 2) acquire -> contended -> acquired -> release
83 * 3) acquire (with read or try) -> release
84 * 4) Are there other patterns?
86 struct lock_seq_stat {
87 struct list_head list;
99 struct list_head seq_list;
103 * CONTENTION_STACK_DEPTH
104 * Number of stack trace entries to find callers
106 #define CONTENTION_STACK_DEPTH 8
109 * CONTENTION_STACK_SKIP
110 * Number of stack trace entries to skip when finding callers.
111 * The first few entries belong to the locking implementation itself.
113 #define CONTENTION_STACK_SKIP 4
116 * flags for lock:contention_begin
117 * Imported from include/trace/events/lock.h.
119 #define LCB_F_SPIN (1U << 0)
120 #define LCB_F_READ (1U << 1)
121 #define LCB_F_WRITE (1U << 2)
122 #define LCB_F_RT (1U << 3)
123 #define LCB_F_PERCPU (1U << 4)
124 #define LCB_F_MUTEX (1U << 5)
130 struct lock_contention_fails {
137 struct lock_contention {
138 struct evlist *evlist;
139 struct target *target;
140 struct machine *machine;
141 struct hlist_head *result;
142 struct lock_filter *filters;
143 struct lock_contention_fails fails;
144 struct rb_root cgroups;
145 unsigned long map_nr_entries;
155 int parse_call_stack(const struct option *opt, const char *str, int unset);
156 bool needs_callstack(void);
158 struct lock_stat *lock_stat_find(u64 addr);
159 struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags);
161 bool match_callstack_filter(struct machine *machine, u64 *callstack, int max_stack_depth);
165 int lock_contention_prepare(struct lock_contention *con);
166 int lock_contention_start(void);
167 int lock_contention_stop(void);
168 int lock_contention_read(struct lock_contention *con);
169 int lock_contention_finish(struct lock_contention *con);
171 #else /* !HAVE_BPF_SKEL */
173 static inline int lock_contention_prepare(struct lock_contention *con __maybe_unused)
178 static inline int lock_contention_start(void) { return 0; }
179 static inline int lock_contention_stop(void) { return 0; }
180 static inline int lock_contention_finish(struct lock_contention *con __maybe_unused)
185 static inline int lock_contention_read(struct lock_contention *con __maybe_unused)
190 #endif /* HAVE_BPF_SKEL */
192 #endif /* PERF_LOCK_CONTENTION_H */