1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Runtime locking correctness validator
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
8 * see Documentation/locking/lockdep-design.rst for more details.
10 #ifndef __LINUX_LOCKDEP_TYPES_H
11 #define __LINUX_LOCKDEP_TYPES_H
13 #include <linux/types.h>
15 #define MAX_LOCKDEP_SUBCLASSES 8UL
17 enum lockdep_wait_type {
18 LD_WAIT_INV = 0, /* not checked, catch all */
20 LD_WAIT_FREE, /* wait free, rcu etc.. */
21 LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
23 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
24 LD_WAIT_CONFIG, /* preemptible in PREEMPT_RT, spinlock_t etc.. */
26 LD_WAIT_CONFIG = LD_WAIT_SPIN,
28 LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
30 LD_WAIT_MAX, /* must be last */
33 enum lockdep_lock_type {
34 LD_LOCK_NORMAL = 0, /* normal, catch all */
35 LD_LOCK_PERCPU, /* percpu */
36 LD_LOCK_WAIT_OVERRIDE, /* annotation */
43 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
44 * the total number of states... :-(
46 * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each
47 * of those we generates 4 states, Additionally we report on USED and USED_READ.
49 #define XXX_LOCK_USAGE_STATES 2
50 #define LOCK_TRACE_STATES (XXX_LOCK_USAGE_STATES*4 + 2)
53 * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
54 * cached in the instance of lockdep_map
56 * Currently main class (subclass == 0) and single depth subclass
57 * are cached in lockdep_map. This optimization is mainly targeting
58 * on rq->lock. double_rq_lock() acquires this highly competitive with
61 #define NR_LOCKDEP_CACHING_CLASSES 2
64 * A lockdep key is associated with each lock object. For static locks we use
65 * the lock address itself as the key. Dynamically allocated lock objects can
66 * have a statically or dynamically allocated key. Dynamically allocated lock
67 * keys must be registered before being used and must be unregistered before
68 * the key memory is freed.
70 struct lockdep_subclass_key {
72 } __attribute__ ((__packed__));
74 /* hash_entry is used to keep track of dynamically allocated keys. */
75 struct lock_class_key {
77 struct hlist_node hash_entry;
78 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
82 extern struct lock_class_key __lockdep_no_validate__;
86 #define LOCKSTAT_POINTS 4
89 typedef int (*lock_cmp_fn)(const struct lockdep_map *a,
90 const struct lockdep_map *b);
91 typedef void (*lock_print_fn)(const struct lockdep_map *map);
94 * The lock-class itself. The order of the structure members matters.
95 * reinit_class() zeroes the key member and all subsequent members.
101 struct hlist_node hash_entry;
104 * Entry in all_lock_classes when in use. Entry in free_lock_classes
105 * when not in use. Instances that are being freed are on one of the
106 * zapped_classes lists.
108 struct list_head lock_entry;
111 * These fields represent a directed graph of lock dependencies,
112 * to every node we attach a list of "forward" and a list of
113 * "backward" graph nodes.
115 struct list_head locks_after, locks_before;
117 const struct lockdep_subclass_key *key;
119 lock_print_fn print_fn;
121 unsigned int subclass;
122 unsigned int dep_gen_id;
125 * IRQ/softirq usage tracking bits:
127 unsigned long usage_mask;
128 const struct lock_trace *usage_traces[LOCK_TRACE_STATES];
132 * Generation counter, when doing certain classes of graph walking,
133 * to ensure that we check one node only once:
142 #ifdef CONFIG_LOCK_STAT
143 unsigned long contention_point[LOCKSTAT_POINTS];
144 unsigned long contending_point[LOCKSTAT_POINTS];
146 } __no_randomize_layout;
148 #ifdef CONFIG_LOCK_STAT
157 bounce_acquired_write,
158 bounce_acquired_read,
159 bounce_contended_write,
160 bounce_contended_read,
163 bounce_acquired = bounce_acquired_write,
164 bounce_contended = bounce_contended_write,
167 struct lock_class_stats {
168 unsigned long contention_point[LOCKSTAT_POINTS];
169 unsigned long contending_point[LOCKSTAT_POINTS];
170 struct lock_time read_waittime;
171 struct lock_time write_waittime;
172 struct lock_time read_holdtime;
173 struct lock_time write_holdtime;
174 unsigned long bounces[nr_bounce_types];
177 struct lock_class_stats lock_stats(struct lock_class *class);
178 void clear_lock_stats(struct lock_class *class);
182 * Map the lock object (the lock instance) to the lock-class object.
183 * This is embedded into specific lock instances:
186 struct lock_class_key *key;
187 struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
189 u8 wait_type_outer; /* can be taken in this context */
190 u8 wait_type_inner; /* presents this context */
193 #ifdef CONFIG_LOCK_STAT
199 struct pin_cookie { unsigned int val; };
201 #define MAX_LOCKDEP_KEYS_BITS 13
202 #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
203 #define INITIAL_CHAIN_KEY -1
207 * One-way hash of the dependency chain up to this point. We
208 * hash the hashes step by step as the dependency chain grows.
210 * We use it for dependency-caching and we skip detection
211 * passes and dependency-updates if there is a cache-hit, so
212 * it is absolutely critical for 100% coverage of the validator
213 * to have a unique key value for every unique dependency path
214 * that can occur in the system, to make a unique hash value
215 * as likely as possible - hence the 64-bit width.
217 * The task struct holds the current hash value (initialized
218 * with zero), here we store the previous hash value:
221 unsigned long acquire_ip;
222 struct lockdep_map *instance;
223 struct lockdep_map *nest_lock;
224 #ifdef CONFIG_LOCK_STAT
229 * class_idx is zero-indexed; it points to the element in
230 * lock_classes this held lock instance belongs to. class_idx is in
231 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
233 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
235 * The lock-stack is unified in that the lock chains of interrupt
236 * contexts nest ontop of process context chains, but we 'separate'
237 * the hashes by starting with 0 if we cross into an interrupt
238 * context, and we also keep do not add cross-context lock
239 * dependencies - the lock usage graph walking covers that area
240 * anyway, and we'd just unnecessarily increase the number of
241 * dependencies otherwise. [Note: hardirq and softirq contexts
242 * are separated from each other too.]
244 * The following field is used to detect when we cross into an
247 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
248 unsigned int trylock:1; /* 16 bits */
250 unsigned int read:2; /* see lock_acquire() comment */
251 unsigned int check:1; /* see lock_acquire() comment */
252 unsigned int hardirqs_off:1;
254 unsigned int references:11; /* 32 bits */
255 unsigned int pin_count;
258 #else /* !CONFIG_LOCKDEP */
261 * The class key takes no space if lockdep is disabled:
263 struct lock_class_key { };
266 * The lockdep_map takes no space if lockdep is disabled:
268 struct lockdep_map { };
270 struct pin_cookie { };
272 #endif /* !LOCKDEP */
274 #endif /* __LINUX_LOCKDEP_TYPES_H */