1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Runtime locking correctness validator
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
8 * see Documentation/locking/lockdep-design.rst for more details.
10 #ifndef __LINUX_LOCKDEP_H
11 #define __LINUX_LOCKDEP_H
13 #include <linux/lockdep_types.h>
14 #include <linux/smp.h>
15 #include <asm/percpu.h>
20 extern int prove_locking;
25 #include <linux/linkage.h>
26 #include <linux/list.h>
27 #include <linux/debug_locks.h>
28 #include <linux/stacktrace.h>
30 static inline void lockdep_copy_map(struct lockdep_map *to,
31 struct lockdep_map *from)
37 * Since the class cache can be modified concurrently we could observe
38 * half pointers (64bit arch using 32bit copy insns). Therefore clear
39 * the caches and take the performance hit.
41 * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
42 * that relies on cache abuse.
44 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
45 to->class_cache[i] = NULL;
49 * Every lock has a list of other locks that were taken after it.
50 * We only grow the list, never remove from it:
53 struct list_head entry;
54 struct lock_class *class;
55 struct lock_class *links_to;
56 const struct lock_trace *trace;
58 /* bitmap of different dependencies from head to this */
60 /* used by BFS to record whether "prev -> this" only has -(*R)-> */
64 * The parent field is used to implement breadth-first search, and the
65 * bit 0 is reused to indicate if the lock has been accessed in BFS.
67 struct lock_list *parent;
71 * struct lock_chain - lock dependency chain record
73 * @irq_context: the same as irq_context in held_lock below
74 * @depth: the number of held locks in this chain
75 * @base: the index in chain_hlocks for this chain
76 * @entry: the collided lock chains in lock_chain hash list
77 * @chain_key: the hash key of this lock_chain
80 /* see BUILD_BUG_ON()s in add_chain_cache() */
81 unsigned int irq_context : 2,
85 struct hlist_node entry;
89 #define MAX_LOCKDEP_KEYS_BITS 13
90 #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)
91 #define INITIAL_CHAIN_KEY -1
95 * One-way hash of the dependency chain up to this point. We
96 * hash the hashes step by step as the dependency chain grows.
98 * We use it for dependency-caching and we skip detection
99 * passes and dependency-updates if there is a cache-hit, so
100 * it is absolutely critical for 100% coverage of the validator
101 * to have a unique key value for every unique dependency path
102 * that can occur in the system, to make a unique hash value
103 * as likely as possible - hence the 64-bit width.
105 * The task struct holds the current hash value (initialized
106 * with zero), here we store the previous hash value:
109 unsigned long acquire_ip;
110 struct lockdep_map *instance;
111 struct lockdep_map *nest_lock;
112 #ifdef CONFIG_LOCK_STAT
117 * class_idx is zero-indexed; it points to the element in
118 * lock_classes this held lock instance belongs to. class_idx is in
119 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
121 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
123 * The lock-stack is unified in that the lock chains of interrupt
124 * contexts nest ontop of process context chains, but we 'separate'
125 * the hashes by starting with 0 if we cross into an interrupt
126 * context, and we also keep do not add cross-context lock
127 * dependencies - the lock usage graph walking covers that area
128 * anyway, and we'd just unnecessarily increase the number of
129 * dependencies otherwise. [Note: hardirq and softirq contexts
130 * are separated from each other too.]
132 * The following field is used to detect when we cross into an
135 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
136 unsigned int trylock:1; /* 16 bits */
138 unsigned int read:2; /* see lock_acquire() comment */
139 unsigned int check:1; /* see lock_acquire() comment */
140 unsigned int hardirqs_off:1;
141 unsigned int references:12; /* 32 bits */
142 unsigned int pin_count;
146 * Initialization, self-test and debugging-output methods:
148 extern void lockdep_init(void);
149 extern void lockdep_reset(void);
150 extern void lockdep_reset_lock(struct lockdep_map *lock);
151 extern void lockdep_free_key_range(void *start, unsigned long size);
152 extern asmlinkage void lockdep_sys_exit(void);
153 extern void lockdep_set_selftest_task(struct task_struct *task);
155 extern void lockdep_init_task(struct task_struct *task);
158 * Split the recrursion counter in two to readily detect 'off' vs recursion.
160 #define LOCKDEP_RECURSION_BITS 16
161 #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
162 #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
165 * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due
166 * to header dependencies.
169 #define lockdep_off() \
171 current->lockdep_recursion += LOCKDEP_OFF; \
174 #define lockdep_on() \
176 current->lockdep_recursion -= LOCKDEP_OFF; \
179 extern void lockdep_register_key(struct lock_class_key *key);
180 extern void lockdep_unregister_key(struct lock_class_key *key);
183 * These methods are used by specific locking variants (spinlocks,
184 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
188 extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
189 struct lock_class_key *key, int subclass, short inner, short outer);
192 lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
193 struct lock_class_key *key, int subclass, short inner)
195 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
198 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
199 struct lock_class_key *key, int subclass)
201 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
205 * Reinitialize a lock key - for cases where there is special locking or
206 * special initialization of locks so that the validator gets the scope
207 * of dependencies wrong: they are either too broad (they need a class-split)
208 * or they are too narrow (they suffer from a false class-split):
210 #define lockdep_set_class(lock, key) \
211 lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \
212 (lock)->dep_map.wait_type_inner, \
213 (lock)->dep_map.wait_type_outer)
215 #define lockdep_set_class_and_name(lock, key, name) \
216 lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \
217 (lock)->dep_map.wait_type_inner, \
218 (lock)->dep_map.wait_type_outer)
220 #define lockdep_set_class_and_subclass(lock, key, sub) \
221 lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
222 (lock)->dep_map.wait_type_inner, \
223 (lock)->dep_map.wait_type_outer)
225 #define lockdep_set_subclass(lock, sub) \
226 lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
227 (lock)->dep_map.wait_type_inner, \
228 (lock)->dep_map.wait_type_outer)
230 #define lockdep_set_novalidate_class(lock) \
231 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
234 * Compare locking classes
236 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
238 static inline int lockdep_match_key(struct lockdep_map *lock,
239 struct lock_class_key *key)
241 return lock->key == key;
249 * 0: exclusive (write) acquire
250 * 1: read-acquire (no recursion allowed)
251 * 2: read-acquire with same-instance recursion allowed
255 * 0: simple checks (freeing, held-at-exit-time, etc.)
258 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
259 int trylock, int read, int check,
260 struct lockdep_map *nest_lock, unsigned long ip);
262 extern void lock_release(struct lockdep_map *lock, unsigned long ip);
265 * Same "read" as for lock_acquire(), except -1 means any.
267 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
269 static inline int lock_is_held(const struct lockdep_map *lock)
271 return lock_is_held_type(lock, -1);
274 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
275 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r))
277 extern void lock_set_class(struct lockdep_map *lock, const char *name,
278 struct lock_class_key *key, unsigned int subclass,
281 static inline void lock_set_subclass(struct lockdep_map *lock,
282 unsigned int subclass, unsigned long ip)
284 lock_set_class(lock, lock->name, lock->key, subclass, ip);
287 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
289 #define NIL_COOKIE (struct pin_cookie){ .val = 0U, }
291 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
292 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
293 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
295 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
297 #define lockdep_assert_held(l) do { \
298 WARN_ON(debug_locks && !lockdep_is_held(l)); \
301 #define lockdep_assert_held_write(l) do { \
302 WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \
305 #define lockdep_assert_held_read(l) do { \
306 WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \
309 #define lockdep_assert_held_once(l) do { \
310 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
313 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
315 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
316 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
317 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
319 #else /* !CONFIG_LOCKDEP */
321 static inline void lockdep_init_task(struct task_struct *task)
325 static inline void lockdep_off(void)
329 static inline void lockdep_on(void)
333 static inline void lockdep_set_selftest_task(struct task_struct *task)
337 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
338 # define lock_release(l, i) do { } while (0)
339 # define lock_downgrade(l, i) do { } while (0)
340 # define lock_set_class(l, n, k, s, i) do { } while (0)
341 # define lock_set_subclass(l, s, i) do { } while (0)
342 # define lockdep_init() do { } while (0)
343 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
344 do { (void)(name); (void)(key); } while (0)
345 # define lockdep_init_map_wait(lock, name, key, sub, inner) \
346 do { (void)(name); (void)(key); } while (0)
347 # define lockdep_init_map(lock, name, key, sub) \
348 do { (void)(name); (void)(key); } while (0)
349 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
350 # define lockdep_set_class_and_name(lock, key, name) \
351 do { (void)(key); (void)(name); } while (0)
352 #define lockdep_set_class_and_subclass(lock, key, sub) \
353 do { (void)(key); } while (0)
354 #define lockdep_set_subclass(lock, sub) do { } while (0)
356 #define lockdep_set_novalidate_class(lock) do { } while (0)
359 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
360 * case since the result is not well defined and the caller should rather
361 * #ifdef the call himself.
364 # define lockdep_reset() do { debug_locks = 1; } while (0)
365 # define lockdep_free_key_range(start, size) do { } while (0)
366 # define lockdep_sys_exit() do { } while (0)
368 static inline void lockdep_register_key(struct lock_class_key *key)
372 static inline void lockdep_unregister_key(struct lock_class_key *key)
376 #define lockdep_depth(tsk) (0)
378 #define lockdep_is_held_type(l, r) (1)
380 #define lockdep_assert_held(l) do { (void)(l); } while (0)
381 #define lockdep_assert_held_write(l) do { (void)(l); } while (0)
382 #define lockdep_assert_held_read(l) do { (void)(l); } while (0)
383 #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
385 #define lockdep_recursing(tsk) (0)
387 #define NIL_COOKIE (struct pin_cookie){ }
389 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
390 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
391 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
393 #endif /* !LOCKDEP */
395 enum xhlock_context_t {
401 #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
403 * To initialize a lockdep_map statically use this macro.
404 * Note that _name must not be NULL.
406 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
407 { .name = (_name), .key = (void *)(_key), }
409 static inline void lockdep_invariant_state(bool force) {}
410 static inline void lockdep_free_task(struct task_struct *task) {}
412 #ifdef CONFIG_LOCK_STAT
414 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
415 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
417 #define LOCK_CONTENDED(_lock, try, lock) \
420 lock_contended(&(_lock)->dep_map, _RET_IP_); \
423 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
426 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
430 lock_contended(&(_lock)->dep_map, _RET_IP_); \
431 ____err = lock(_lock); \
434 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
438 #else /* CONFIG_LOCK_STAT */
440 #define lock_contended(lockdep_map, ip) do {} while (0)
441 #define lock_acquired(lockdep_map, ip) do {} while (0)
443 #define LOCK_CONTENDED(_lock, try, lock) \
446 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \
449 #endif /* CONFIG_LOCK_STAT */
451 #ifdef CONFIG_LOCKDEP
454 * On lockdep we dont want the hand-coded irq-enable of
455 * _raw_*_lock_flags() code, because lockdep assumes
456 * that interrupts are not re-enabled during lock-acquire:
458 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
459 LOCK_CONTENDED((_lock), (try), (lock))
461 #else /* CONFIG_LOCKDEP */
463 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
464 lockfl((_lock), (flags))
466 #endif /* CONFIG_LOCKDEP */
468 #ifdef CONFIG_PROVE_LOCKING
469 extern void print_irqtrace_events(struct task_struct *curr);
471 static inline void print_irqtrace_events(struct task_struct *curr)
476 /* Variable used to make lockdep treat read_lock() as recursive in selftests */
477 #ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
478 extern unsigned int force_read_lock_recursive;
479 #else /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
480 #define force_read_lock_recursive 0
481 #endif /* CONFIG_DEBUG_LOCKING_API_SELFTESTS */
483 #ifdef CONFIG_LOCKDEP
484 extern bool read_lock_is_recursive(void);
485 #else /* CONFIG_LOCKDEP */
486 /* If !LOCKDEP, the value is meaningless */
487 #define read_lock_is_recursive() 0
491 * For trivial one-depth nesting of a lock-class, the following
492 * global define can be used. (Subsystems with multiple levels
493 * of nesting should define their own lock-nesting subclasses.)
495 #define SINGLE_DEPTH_NESTING 1
498 * Map the dependency ops to NOP or to real lockdep ops, depending
499 * on the per lock-class debug mode:
502 #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
503 #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
504 #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
506 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
507 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
508 #define spin_release(l, i) lock_release(l, i)
510 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
511 #define rwlock_acquire_read(l, s, t, i) \
513 if (read_lock_is_recursive()) \
514 lock_acquire_shared_recursive(l, s, t, NULL, i); \
516 lock_acquire_shared(l, s, t, NULL, i); \
519 #define rwlock_release(l, i) lock_release(l, i)
521 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
522 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
523 #define seqcount_release(l, i) lock_release(l, i)
525 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
526 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
527 #define mutex_release(l, i) lock_release(l, i)
529 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
530 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
531 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
532 #define rwsem_release(l, i) lock_release(l, i)
534 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
535 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
536 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
537 #define lock_map_release(l) lock_release(l, _THIS_IP_)
539 #ifdef CONFIG_PROVE_LOCKING
540 # define might_lock(lock) \
542 typecheck(struct lockdep_map *, &(lock)->dep_map); \
543 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
544 lock_release(&(lock)->dep_map, _THIS_IP_); \
546 # define might_lock_read(lock) \
548 typecheck(struct lockdep_map *, &(lock)->dep_map); \
549 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
550 lock_release(&(lock)->dep_map, _THIS_IP_); \
552 # define might_lock_nested(lock, subclass) \
554 typecheck(struct lockdep_map *, &(lock)->dep_map); \
555 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
557 lock_release(&(lock)->dep_map, _THIS_IP_); \
560 DECLARE_PER_CPU(int, hardirqs_enabled);
561 DECLARE_PER_CPU(int, hardirq_context);
562 DECLARE_PER_CPU(unsigned int, lockdep_recursion);
564 #define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
566 #define lockdep_assert_irqs_enabled() \
568 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
571 #define lockdep_assert_irqs_disabled() \
573 WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
576 #define lockdep_assert_in_irq() \
578 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
581 #define lockdep_assert_preemption_enabled() \
583 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
584 __lockdep_enabled && \
585 (preempt_count() != 0 || \
586 !this_cpu_read(hardirqs_enabled))); \
589 #define lockdep_assert_preemption_disabled() \
591 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
592 __lockdep_enabled && \
593 (preempt_count() == 0 && \
594 this_cpu_read(hardirqs_enabled))); \
598 # define might_lock(lock) do { } while (0)
599 # define might_lock_read(lock) do { } while (0)
600 # define might_lock_nested(lock, subclass) do { } while (0)
602 # define lockdep_assert_irqs_enabled() do { } while (0)
603 # define lockdep_assert_irqs_disabled() do { } while (0)
604 # define lockdep_assert_in_irq() do { } while (0)
606 # define lockdep_assert_preemption_enabled() do { } while (0)
607 # define lockdep_assert_preemption_disabled() do { } while (0)
610 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
612 # define lockdep_assert_RT_in_threaded_ctx() do { \
613 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
614 lockdep_hardirq_context() && \
615 !(current->hardirq_threaded || current->irq_config), \
616 "Not in threaded context on PREEMPT_RT as expected\n"); \
621 # define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
625 #ifdef CONFIG_LOCKDEP
626 void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
629 lockdep_rcu_suspicious(const char *file, const int line, const char *s)
634 #endif /* __LINUX_LOCKDEP_H */