]>
Commit | Line | Data |
---|---|---|
fb1c8f93 IM |
1 | /* |
2 | * Copyright 2005, Red Hat, Inc., Ingo Molnar | |
3 | * Released under the General Public License (GPL). | |
4 | * | |
5 | * This file contains the spinlock/rwlock implementations for | |
6 | * DEBUG_SPINLOCK. | |
7 | */ | |
8 | ||
fb1c8f93 | 9 | #include <linux/spinlock.h> |
bb81a09e | 10 | #include <linux/nmi.h> |
fb1c8f93 | 11 | #include <linux/interrupt.h> |
9a11b49a | 12 | #include <linux/debug_locks.h> |
fb1c8f93 | 13 | #include <linux/delay.h> |
9a11b49a | 14 | #include <linux/module.h> |
fb1c8f93 | 15 | |
8a25d5de IM |
16 | void __spin_lock_init(spinlock_t *lock, const char *name, |
17 | struct lock_class_key *key) | |
18 | { | |
19 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
20 | /* | |
21 | * Make sure we are not reinitializing a held lock: | |
22 | */ | |
23 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | |
4dfbb9d8 | 24 | lockdep_init_map(&lock->dep_map, name, key, 0); |
8a25d5de IM |
25 | #endif |
26 | lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | |
27 | lock->magic = SPINLOCK_MAGIC; | |
28 | lock->owner = SPINLOCK_OWNER_INIT; | |
29 | lock->owner_cpu = -1; | |
30 | } | |
31 | ||
32 | EXPORT_SYMBOL(__spin_lock_init); | |
33 | ||
34 | void __rwlock_init(rwlock_t *lock, const char *name, | |
35 | struct lock_class_key *key) | |
36 | { | |
37 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
38 | /* | |
39 | * Make sure we are not reinitializing a held lock: | |
40 | */ | |
41 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | |
4dfbb9d8 | 42 | lockdep_init_map(&lock->dep_map, name, key, 0); |
8a25d5de IM |
43 | #endif |
44 | lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; | |
45 | lock->magic = RWLOCK_MAGIC; | |
46 | lock->owner = SPINLOCK_OWNER_INIT; | |
47 | lock->owner_cpu = -1; | |
48 | } | |
49 | ||
50 | EXPORT_SYMBOL(__rwlock_init); | |
51 | ||
fb1c8f93 IM |
52 | static void spin_bug(spinlock_t *lock, const char *msg) |
53 | { | |
fb1c8f93 IM |
54 | struct task_struct *owner = NULL; |
55 | ||
9a11b49a IM |
56 | if (!debug_locks_off()) |
57 | return; | |
58 | ||
59 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) | |
60 | owner = lock->owner; | |
61 | printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", | |
62 | msg, raw_smp_processor_id(), | |
ba25f9dc | 63 | current->comm, task_pid_nr(current)); |
9a11b49a IM |
64 | printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " |
65 | ".owner_cpu: %d\n", | |
66 | lock, lock->magic, | |
67 | owner ? owner->comm : "<none>", | |
ba25f9dc | 68 | owner ? task_pid_nr(owner) : -1, |
9a11b49a IM |
69 | lock->owner_cpu); |
70 | dump_stack(); | |
fb1c8f93 IM |
71 | } |
72 | ||
73 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) | |
74 | ||
9a11b49a IM |
75 | static inline void |
76 | debug_spin_lock_before(spinlock_t *lock) | |
fb1c8f93 IM |
77 | { |
78 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | |
79 | SPIN_BUG_ON(lock->owner == current, lock, "recursion"); | |
80 | SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), | |
81 | lock, "cpu recursion"); | |
82 | } | |
83 | ||
84 | static inline void debug_spin_lock_after(spinlock_t *lock) | |
85 | { | |
86 | lock->owner_cpu = raw_smp_processor_id(); | |
87 | lock->owner = current; | |
88 | } | |
89 | ||
90 | static inline void debug_spin_unlock(spinlock_t *lock) | |
91 | { | |
92 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | |
93 | SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); | |
94 | SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); | |
95 | SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | |
96 | lock, "wrong CPU"); | |
97 | lock->owner = SPINLOCK_OWNER_INIT; | |
98 | lock->owner_cpu = -1; | |
99 | } | |
100 | ||
101 | static void __spin_lock_debug(spinlock_t *lock) | |
102 | { | |
fb1c8f93 | 103 | u64 i; |
c22f008b CE |
104 | u64 loops = loops_per_jiffy * HZ; |
105 | int print_once = 1; | |
fb1c8f93 IM |
106 | |
107 | for (;;) { | |
c22f008b | 108 | for (i = 0; i < loops; i++) { |
fb1c8f93 IM |
109 | if (__raw_spin_trylock(&lock->raw_lock)) |
110 | return; | |
e0a60296 | 111 | __delay(1); |
fb1c8f93 IM |
112 | } |
113 | /* lockup suspected: */ | |
114 | if (print_once) { | |
115 | print_once = 0; | |
51989b9f DJ |
116 | printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, " |
117 | "%s/%d, %p\n", | |
bb44f116 | 118 | raw_smp_processor_id(), current->comm, |
ba25f9dc | 119 | task_pid_nr(current), lock); |
fb1c8f93 | 120 | dump_stack(); |
bb81a09e AM |
121 | #ifdef CONFIG_SMP |
122 | trigger_all_cpu_backtrace(); | |
123 | #endif | |
fb1c8f93 IM |
124 | } |
125 | } | |
126 | } | |
127 | ||
128 | void _raw_spin_lock(spinlock_t *lock) | |
129 | { | |
130 | debug_spin_lock_before(lock); | |
131 | if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) | |
132 | __spin_lock_debug(lock); | |
133 | debug_spin_lock_after(lock); | |
134 | } | |
135 | ||
136 | int _raw_spin_trylock(spinlock_t *lock) | |
137 | { | |
138 | int ret = __raw_spin_trylock(&lock->raw_lock); | |
139 | ||
140 | if (ret) | |
141 | debug_spin_lock_after(lock); | |
142 | #ifndef CONFIG_SMP | |
143 | /* | |
144 | * Must not happen on UP: | |
145 | */ | |
146 | SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); | |
147 | #endif | |
148 | return ret; | |
149 | } | |
150 | ||
151 | void _raw_spin_unlock(spinlock_t *lock) | |
152 | { | |
153 | debug_spin_unlock(lock); | |
154 | __raw_spin_unlock(&lock->raw_lock); | |
155 | } | |
156 | ||
157 | static void rwlock_bug(rwlock_t *lock, const char *msg) | |
158 | { | |
9a11b49a IM |
159 | if (!debug_locks_off()) |
160 | return; | |
161 | ||
162 | printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", | |
163 | msg, raw_smp_processor_id(), current->comm, | |
ba25f9dc | 164 | task_pid_nr(current), lock); |
9a11b49a | 165 | dump_stack(); |
fb1c8f93 IM |
166 | } |
167 | ||
168 | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) | |
169 | ||
72f0b4e2 | 170 | #if 0 /* __write_lock_debug() can lock up - maybe this can too? */ |
fb1c8f93 IM |
171 | static void __read_lock_debug(rwlock_t *lock) |
172 | { | |
fb1c8f93 | 173 | u64 i; |
c22f008b CE |
174 | u64 loops = loops_per_jiffy * HZ; |
175 | int print_once = 1; | |
fb1c8f93 IM |
176 | |
177 | for (;;) { | |
c22f008b | 178 | for (i = 0; i < loops; i++) { |
fb1c8f93 IM |
179 | if (__raw_read_trylock(&lock->raw_lock)) |
180 | return; | |
e0a60296 | 181 | __delay(1); |
fb1c8f93 IM |
182 | } |
183 | /* lockup suspected: */ | |
184 | if (print_once) { | |
185 | print_once = 0; | |
51989b9f DJ |
186 | printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, " |
187 | "%s/%d, %p\n", | |
bb44f116 IM |
188 | raw_smp_processor_id(), current->comm, |
189 | current->pid, lock); | |
fb1c8f93 IM |
190 | dump_stack(); |
191 | } | |
192 | } | |
193 | } | |
72f0b4e2 | 194 | #endif |
fb1c8f93 IM |
195 | |
196 | void _raw_read_lock(rwlock_t *lock) | |
197 | { | |
198 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | |
72f0b4e2 | 199 | __raw_read_lock(&lock->raw_lock); |
fb1c8f93 IM |
200 | } |
201 | ||
202 | int _raw_read_trylock(rwlock_t *lock) | |
203 | { | |
204 | int ret = __raw_read_trylock(&lock->raw_lock); | |
205 | ||
206 | #ifndef CONFIG_SMP | |
207 | /* | |
208 | * Must not happen on UP: | |
209 | */ | |
210 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); | |
211 | #endif | |
212 | return ret; | |
213 | } | |
214 | ||
215 | void _raw_read_unlock(rwlock_t *lock) | |
216 | { | |
217 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | |
218 | __raw_read_unlock(&lock->raw_lock); | |
219 | } | |
220 | ||
221 | static inline void debug_write_lock_before(rwlock_t *lock) | |
222 | { | |
223 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | |
224 | RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); | |
225 | RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), | |
226 | lock, "cpu recursion"); | |
227 | } | |
228 | ||
229 | static inline void debug_write_lock_after(rwlock_t *lock) | |
230 | { | |
231 | lock->owner_cpu = raw_smp_processor_id(); | |
232 | lock->owner = current; | |
233 | } | |
234 | ||
235 | static inline void debug_write_unlock(rwlock_t *lock) | |
236 | { | |
237 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | |
238 | RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); | |
239 | RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | |
240 | lock, "wrong CPU"); | |
241 | lock->owner = SPINLOCK_OWNER_INIT; | |
242 | lock->owner_cpu = -1; | |
243 | } | |
244 | ||
72f0b4e2 | 245 | #if 0 /* This can cause lockups */ |
fb1c8f93 IM |
246 | static void __write_lock_debug(rwlock_t *lock) |
247 | { | |
fb1c8f93 | 248 | u64 i; |
c22f008b CE |
249 | u64 loops = loops_per_jiffy * HZ; |
250 | int print_once = 1; | |
fb1c8f93 IM |
251 | |
252 | for (;;) { | |
c22f008b | 253 | for (i = 0; i < loops; i++) { |
fb1c8f93 IM |
254 | if (__raw_write_trylock(&lock->raw_lock)) |
255 | return; | |
e0a60296 | 256 | __delay(1); |
fb1c8f93 IM |
257 | } |
258 | /* lockup suspected: */ | |
259 | if (print_once) { | |
260 | print_once = 0; | |
51989b9f DJ |
261 | printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, " |
262 | "%s/%d, %p\n", | |
bb44f116 IM |
263 | raw_smp_processor_id(), current->comm, |
264 | current->pid, lock); | |
fb1c8f93 IM |
265 | dump_stack(); |
266 | } | |
267 | } | |
268 | } | |
72f0b4e2 | 269 | #endif |
fb1c8f93 IM |
270 | |
271 | void _raw_write_lock(rwlock_t *lock) | |
272 | { | |
273 | debug_write_lock_before(lock); | |
72f0b4e2 | 274 | __raw_write_lock(&lock->raw_lock); |
fb1c8f93 IM |
275 | debug_write_lock_after(lock); |
276 | } | |
277 | ||
278 | int _raw_write_trylock(rwlock_t *lock) | |
279 | { | |
280 | int ret = __raw_write_trylock(&lock->raw_lock); | |
281 | ||
282 | if (ret) | |
283 | debug_write_lock_after(lock); | |
284 | #ifndef CONFIG_SMP | |
285 | /* | |
286 | * Must not happen on UP: | |
287 | */ | |
288 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); | |
289 | #endif | |
290 | return ret; | |
291 | } | |
292 | ||
293 | void _raw_write_unlock(rwlock_t *lock) | |
294 | { | |
295 | debug_write_unlock(lock); | |
296 | __raw_write_unlock(&lock->raw_lock); | |
297 | } |