]>
Commit | Line | Data |
---|---|---|
fbb9ce95 IM |
1 | /* |
2 | * kernel/lockdep_internals.h | |
3 | * | |
4 | * Runtime locking correctness validator | |
5 | * | |
6 | * lockdep subsystem internal functions and variables. | |
7 | */ | |
8 | ||
9851673b PZ |
9 | /* |
10 | * Lock-class usage-state bits: | |
11 | */ | |
12 | enum lock_usage_bit { | |
d7b1b021 PZ |
13 | #define LOCKDEP_STATE(__STATE) \ |
14 | LOCK_USED_IN_##__STATE, \ | |
15 | LOCK_USED_IN_##__STATE##_READ, \ | |
16 | LOCK_ENABLED_##__STATE, \ | |
17 | LOCK_ENABLED_##__STATE##_READ, | |
18 | #include "lockdep_states.h" | |
19 | #undef LOCKDEP_STATE | |
20 | LOCK_USED, | |
9851673b PZ |
21 | LOCK_USAGE_STATES |
22 | }; | |
23 | ||
24 | /* | |
25 | * Usage-state bitmasks: | |
26 | */ | |
d7b1b021 PZ |
27 | #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), |
28 | ||
29 | enum { | |
30 | #define LOCKDEP_STATE(__STATE) \ | |
31 | __LOCKF(USED_IN_##__STATE) \ | |
32 | __LOCKF(USED_IN_##__STATE##_READ) \ | |
33 | __LOCKF(ENABLED_##__STATE) \ | |
34 | __LOCKF(ENABLED_##__STATE##_READ) | |
35 | #include "lockdep_states.h" | |
36 | #undef LOCKDEP_STATE | |
37 | __LOCKF(USED) | |
38 | }; | |
9851673b PZ |
39 | |
40 | #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) | |
41 | #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) | |
42 | ||
9851673b PZ |
43 | #define LOCKF_ENABLED_IRQ_READ \ |
44 | (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) | |
45 | #define LOCKF_USED_IN_IRQ_READ \ | |
46 | (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) | |
47 | ||
fbb9ce95 IM |
48 | /* |
49 | * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies | |
50 | * we track. | |
51 | * | |
52 | * We use the per-lock dependency maps in two ways: we grow it by adding | |
53 | * every to-be-taken lock to all currently held lock's own dependency | |
54 | * table (if it's not there yet), and we check it for lock order | |
55 | * conflicts and deadlocks. | |
56 | */ | |
d80c19df | 57 | #define MAX_LOCKDEP_ENTRIES 16384UL |
fbb9ce95 | 58 | |
d80c19df | 59 | #define MAX_LOCKDEP_CHAINS_BITS 15 |
fbb9ce95 IM |
60 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) |
61 | ||
443cd507 YH |
62 | #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) |
63 | ||
fbb9ce95 IM |
64 | /* |
65 | * Stack-trace: tightly packed array of stack backtrace | |
66 | * addresses. Protected by the hash_lock. | |
67 | */ | |
9bb25bf3 | 68 | #define MAX_STACK_TRACE_ENTRIES 262144UL |
fbb9ce95 IM |
69 | |
70 | extern struct list_head all_lock_classes; | |
443cd507 | 71 | extern struct lock_chain lock_chains[]; |
fbb9ce95 | 72 | |
f510b233 PZ |
73 | #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) |
74 | ||
75 | extern void get_usage_chars(struct lock_class *class, | |
76 | char usage[LOCK_USAGE_CHARS]); | |
fbb9ce95 IM |
77 | |
78 | extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); | |
79 | ||
443cd507 YH |
80 | struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); |
81 | ||
fbb9ce95 IM |
82 | extern unsigned long nr_lock_classes; |
83 | extern unsigned long nr_list_entries; | |
84 | extern unsigned long nr_lock_chains; | |
cd1a28e8 | 85 | extern int nr_chain_hlocks; |
fbb9ce95 IM |
86 | extern unsigned long nr_stack_trace_entries; |
87 | ||
88 | extern unsigned int nr_hardirq_chains; | |
89 | extern unsigned int nr_softirq_chains; | |
90 | extern unsigned int nr_process_chains; | |
91 | extern unsigned int max_lockdep_depth; | |
92 | extern unsigned int max_recursion_depth; | |
93 | ||
af012961 PZ |
94 | extern unsigned int max_bfs_queue_depth; |
95 | ||
d6672c50 | 96 | #ifdef CONFIG_PROVE_LOCKING |
419ca3f1 DM |
97 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); |
98 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); | |
d6672c50 IM |
99 | #else |
100 | static inline unsigned long | |
101 | lockdep_count_forward_deps(struct lock_class *class) | |
102 | { | |
103 | return 0; | |
104 | } | |
105 | static inline unsigned long | |
106 | lockdep_count_backward_deps(struct lock_class *class) | |
107 | { | |
108 | return 0; | |
109 | } | |
110 | #endif | |
419ca3f1 | 111 | |
fbb9ce95 | 112 | #ifdef CONFIG_DEBUG_LOCKDEP |
bd6d29c2 FW |
113 | |
114 | #include <asm/local.h> | |
fbb9ce95 | 115 | /* |
bd6d29c2 FW |
116 | * Various lockdep statistics. |
117 | * We want them per cpu as they are often accessed in fast path | |
118 | * and we want to avoid too much cache bouncing. | |
fbb9ce95 | 119 | */ |
bd6d29c2 FW |
120 | struct lockdep_stats { |
121 | int chain_lookup_hits; | |
122 | int chain_lookup_misses; | |
123 | int hardirqs_on_events; | |
124 | int hardirqs_off_events; | |
125 | int redundant_hardirqs_on; | |
126 | int redundant_hardirqs_off; | |
127 | int softirqs_on_events; | |
128 | int softirqs_off_events; | |
129 | int redundant_softirqs_on; | |
130 | int redundant_softirqs_off; | |
131 | int nr_unused_locks; | |
132 | int nr_cyclic_checks; | |
133 | int nr_cyclic_check_recursions; | |
134 | int nr_find_usage_forwards_checks; | |
135 | int nr_find_usage_forwards_recursions; | |
136 | int nr_find_usage_backwards_checks; | |
137 | int nr_find_usage_backwards_recursions; | |
138 | }; | |
139 | ||
140 | DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); | |
141 | ||
ba697f40 FW |
142 | #define __debug_atomic_inc(ptr) \ |
143 | this_cpu_inc(lockdep_stats.ptr); | |
144 | ||
bd6d29c2 | 145 | #define debug_atomic_inc(ptr) { \ |
bd6d29c2 | 146 | WARN_ON_ONCE(!irqs_disabled()); \ |
54d47a2b | 147 | __this_cpu_inc(lockdep_stats.ptr); \ |
bd6d29c2 FW |
148 | } |
149 | ||
150 | #define debug_atomic_dec(ptr) { \ | |
bd6d29c2 | 151 | WARN_ON_ONCE(!irqs_disabled()); \ |
54d47a2b | 152 | __this_cpu_dec(lockdep_stats.ptr); \ |
bd6d29c2 FW |
153 | } |
154 | ||
155 | #define debug_atomic_read(ptr) ({ \ | |
156 | struct lockdep_stats *__cpu_lockdep_stats; \ | |
157 | unsigned long long __total = 0; \ | |
158 | int __cpu; \ | |
159 | for_each_possible_cpu(__cpu) { \ | |
160 | __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ | |
161 | __total += __cpu_lockdep_stats->ptr; \ | |
162 | } \ | |
163 | __total; \ | |
164 | }) | |
fbb9ce95 | 165 | #else |
ba697f40 | 166 | # define __debug_atomic_inc(ptr) do { } while (0) |
fbb9ce95 IM |
167 | # define debug_atomic_inc(ptr) do { } while (0) |
168 | # define debug_atomic_dec(ptr) do { } while (0) | |
169 | # define debug_atomic_read(ptr) 0 | |
170 | #endif |