]>
Commit | Line | Data |
---|---|---|
fb1c8f93 IM |
1 | /* |
2 | * Copyright 2005, Red Hat, Inc., Ingo Molnar | |
3 | * Released under the General Public License (GPL). | |
4 | * | |
5 | * This file contains the spinlock/rwlock implementations for | |
6 | * DEBUG_SPINLOCK. | |
7 | */ | |
8 | ||
fb1c8f93 | 9 | #include <linux/spinlock.h> |
bb81a09e | 10 | #include <linux/nmi.h> |
fb1c8f93 | 11 | #include <linux/interrupt.h> |
9a11b49a | 12 | #include <linux/debug_locks.h> |
fb1c8f93 | 13 | #include <linux/delay.h> |
8bc3bcc9 | 14 | #include <linux/export.h> |
fb1c8f93 | 15 | |
c2f21ce2 TG |
16 | void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
17 | struct lock_class_key *key) | |
8a25d5de IM |
18 | { |
19 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
20 | /* | |
21 | * Make sure we are not reinitializing a held lock: | |
22 | */ | |
23 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | |
4dfbb9d8 | 24 | lockdep_init_map(&lock->dep_map, name, key, 0); |
8a25d5de | 25 | #endif |
edc35bd7 | 26 | lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
8a25d5de IM |
27 | lock->magic = SPINLOCK_MAGIC; |
28 | lock->owner = SPINLOCK_OWNER_INIT; | |
29 | lock->owner_cpu = -1; | |
30 | } | |
31 | ||
c2f21ce2 | 32 | EXPORT_SYMBOL(__raw_spin_lock_init); |
8a25d5de IM |
33 | |
34 | void __rwlock_init(rwlock_t *lock, const char *name, | |
35 | struct lock_class_key *key) | |
36 | { | |
37 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
38 | /* | |
39 | * Make sure we are not reinitializing a held lock: | |
40 | */ | |
41 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | |
4dfbb9d8 | 42 | lockdep_init_map(&lock->dep_map, name, key, 0); |
8a25d5de | 43 | #endif |
fb3a6bbc | 44 | lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; |
8a25d5de IM |
45 | lock->magic = RWLOCK_MAGIC; |
46 | lock->owner = SPINLOCK_OWNER_INIT; | |
47 | lock->owner_cpu = -1; | |
48 | } | |
49 | ||
50 | EXPORT_SYMBOL(__rwlock_init); | |
51 | ||
4e101b0e | 52 | static void spin_dump(raw_spinlock_t *lock, const char *msg) |
fb1c8f93 | 53 | { |
fb1c8f93 IM |
54 | struct task_struct *owner = NULL; |
55 | ||
9a11b49a IM |
56 | if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT) |
57 | owner = lock->owner; | |
58 | printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", | |
59 | msg, raw_smp_processor_id(), | |
ba25f9dc | 60 | current->comm, task_pid_nr(current)); |
4b068148 | 61 | printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, " |
9a11b49a IM |
62 | ".owner_cpu: %d\n", |
63 | lock, lock->magic, | |
64 | owner ? owner->comm : "<none>", | |
ba25f9dc | 65 | owner ? task_pid_nr(owner) : -1, |
9a11b49a IM |
66 | lock->owner_cpu); |
67 | dump_stack(); | |
fb1c8f93 IM |
68 | } |
69 | ||
4e101b0e AM |
70 | static void spin_bug(raw_spinlock_t *lock, const char *msg) |
71 | { | |
72 | if (!debug_locks_off()) | |
73 | return; | |
74 | ||
75 | spin_dump(lock, msg); | |
76 | } | |
77 | ||
fb1c8f93 IM |
78 | #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) |
79 | ||
9a11b49a | 80 | static inline void |
c2f21ce2 | 81 | debug_spin_lock_before(raw_spinlock_t *lock) |
fb1c8f93 IM |
82 | { |
83 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | |
84 | SPIN_BUG_ON(lock->owner == current, lock, "recursion"); | |
85 | SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), | |
86 | lock, "cpu recursion"); | |
87 | } | |
88 | ||
c2f21ce2 | 89 | static inline void debug_spin_lock_after(raw_spinlock_t *lock) |
fb1c8f93 IM |
90 | { |
91 | lock->owner_cpu = raw_smp_processor_id(); | |
92 | lock->owner = current; | |
93 | } | |
94 | ||
c2f21ce2 | 95 | static inline void debug_spin_unlock(raw_spinlock_t *lock) |
fb1c8f93 IM |
96 | { |
97 | SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); | |
c2f21ce2 | 98 | SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked"); |
fb1c8f93 IM |
99 | SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); |
100 | SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | |
101 | lock, "wrong CPU"); | |
102 | lock->owner = SPINLOCK_OWNER_INIT; | |
103 | lock->owner_cpu = -1; | |
104 | } | |
105 | ||
c2f21ce2 | 106 | static void __spin_lock_debug(raw_spinlock_t *lock) |
fb1c8f93 | 107 | { |
fb1c8f93 | 108 | u64 i; |
c22f008b | 109 | u64 loops = loops_per_jiffy * HZ; |
214f766e VM |
110 | |
111 | for (i = 0; i < loops; i++) { | |
112 | if (arch_spin_trylock(&lock->raw_lock)) | |
113 | return; | |
114 | __delay(1); | |
115 | } | |
116 | /* lockup suspected: */ | |
117 | spin_dump(lock, "lockup suspected"); | |
bb81a09e | 118 | #ifdef CONFIG_SMP |
214f766e | 119 | trigger_all_cpu_backtrace(); |
bb81a09e | 120 | #endif |
214f766e VM |
121 | |
122 | /* | |
123 | * The trylock above was causing a livelock. Give the lower level arch | |
124 | * specific lock code a chance to acquire the lock. We have already | |
125 | * printed a warning/backtrace at this point. The non-debug arch | |
126 | * specific code might actually succeed in acquiring the lock. If it is | |
127 | * not successful, the end-result is the same - there is no forward | |
128 | * progress. | |
129 | */ | |
130 | arch_spin_lock(&lock->raw_lock); | |
fb1c8f93 IM |
131 | } |
132 | ||
9828ea9d | 133 | void do_raw_spin_lock(raw_spinlock_t *lock) |
fb1c8f93 IM |
134 | { |
135 | debug_spin_lock_before(lock); | |
0199c4e6 | 136 | if (unlikely(!arch_spin_trylock(&lock->raw_lock))) |
fb1c8f93 IM |
137 | __spin_lock_debug(lock); |
138 | debug_spin_lock_after(lock); | |
139 | } | |
140 | ||
9828ea9d | 141 | int do_raw_spin_trylock(raw_spinlock_t *lock) |
fb1c8f93 | 142 | { |
0199c4e6 | 143 | int ret = arch_spin_trylock(&lock->raw_lock); |
fb1c8f93 IM |
144 | |
145 | if (ret) | |
146 | debug_spin_lock_after(lock); | |
147 | #ifndef CONFIG_SMP | |
148 | /* | |
149 | * Must not happen on UP: | |
150 | */ | |
151 | SPIN_BUG_ON(!ret, lock, "trylock failure on UP"); | |
152 | #endif | |
153 | return ret; | |
154 | } | |
155 | ||
9828ea9d | 156 | void do_raw_spin_unlock(raw_spinlock_t *lock) |
fb1c8f93 IM |
157 | { |
158 | debug_spin_unlock(lock); | |
0199c4e6 | 159 | arch_spin_unlock(&lock->raw_lock); |
fb1c8f93 IM |
160 | } |
161 | ||
162 | static void rwlock_bug(rwlock_t *lock, const char *msg) | |
163 | { | |
9a11b49a IM |
164 | if (!debug_locks_off()) |
165 | return; | |
166 | ||
167 | printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n", | |
168 | msg, raw_smp_processor_id(), current->comm, | |
ba25f9dc | 169 | task_pid_nr(current), lock); |
9a11b49a | 170 | dump_stack(); |
fb1c8f93 IM |
171 | } |
172 | ||
173 | #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg) | |
174 | ||
72f0b4e2 | 175 | #if 0 /* __write_lock_debug() can lock up - maybe this can too? */ |
fb1c8f93 IM |
176 | static void __read_lock_debug(rwlock_t *lock) |
177 | { | |
fb1c8f93 | 178 | u64 i; |
c22f008b CE |
179 | u64 loops = loops_per_jiffy * HZ; |
180 | int print_once = 1; | |
fb1c8f93 IM |
181 | |
182 | for (;;) { | |
c22f008b | 183 | for (i = 0; i < loops; i++) { |
e5931943 | 184 | if (arch_read_trylock(&lock->raw_lock)) |
fb1c8f93 | 185 | return; |
e0a60296 | 186 | __delay(1); |
fb1c8f93 IM |
187 | } |
188 | /* lockup suspected: */ | |
189 | if (print_once) { | |
190 | print_once = 0; | |
51989b9f DJ |
191 | printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, " |
192 | "%s/%d, %p\n", | |
bb44f116 IM |
193 | raw_smp_processor_id(), current->comm, |
194 | current->pid, lock); | |
fb1c8f93 IM |
195 | dump_stack(); |
196 | } | |
197 | } | |
198 | } | |
72f0b4e2 | 199 | #endif |
fb1c8f93 | 200 | |
9828ea9d | 201 | void do_raw_read_lock(rwlock_t *lock) |
fb1c8f93 IM |
202 | { |
203 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | |
e5931943 | 204 | arch_read_lock(&lock->raw_lock); |
fb1c8f93 IM |
205 | } |
206 | ||
9828ea9d | 207 | int do_raw_read_trylock(rwlock_t *lock) |
fb1c8f93 | 208 | { |
e5931943 | 209 | int ret = arch_read_trylock(&lock->raw_lock); |
fb1c8f93 IM |
210 | |
211 | #ifndef CONFIG_SMP | |
212 | /* | |
213 | * Must not happen on UP: | |
214 | */ | |
215 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); | |
216 | #endif | |
217 | return ret; | |
218 | } | |
219 | ||
9828ea9d | 220 | void do_raw_read_unlock(rwlock_t *lock) |
fb1c8f93 IM |
221 | { |
222 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | |
e5931943 | 223 | arch_read_unlock(&lock->raw_lock); |
fb1c8f93 IM |
224 | } |
225 | ||
226 | static inline void debug_write_lock_before(rwlock_t *lock) | |
227 | { | |
228 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | |
229 | RWLOCK_BUG_ON(lock->owner == current, lock, "recursion"); | |
230 | RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(), | |
231 | lock, "cpu recursion"); | |
232 | } | |
233 | ||
234 | static inline void debug_write_lock_after(rwlock_t *lock) | |
235 | { | |
236 | lock->owner_cpu = raw_smp_processor_id(); | |
237 | lock->owner = current; | |
238 | } | |
239 | ||
240 | static inline void debug_write_unlock(rwlock_t *lock) | |
241 | { | |
242 | RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); | |
243 | RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner"); | |
244 | RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), | |
245 | lock, "wrong CPU"); | |
246 | lock->owner = SPINLOCK_OWNER_INIT; | |
247 | lock->owner_cpu = -1; | |
248 | } | |
249 | ||
72f0b4e2 | 250 | #if 0 /* This can cause lockups */ |
fb1c8f93 IM |
251 | static void __write_lock_debug(rwlock_t *lock) |
252 | { | |
fb1c8f93 | 253 | u64 i; |
c22f008b CE |
254 | u64 loops = loops_per_jiffy * HZ; |
255 | int print_once = 1; | |
fb1c8f93 IM |
256 | |
257 | for (;;) { | |
c22f008b | 258 | for (i = 0; i < loops; i++) { |
e5931943 | 259 | if (arch_write_trylock(&lock->raw_lock)) |
fb1c8f93 | 260 | return; |
e0a60296 | 261 | __delay(1); |
fb1c8f93 IM |
262 | } |
263 | /* lockup suspected: */ | |
264 | if (print_once) { | |
265 | print_once = 0; | |
51989b9f DJ |
266 | printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, " |
267 | "%s/%d, %p\n", | |
bb44f116 IM |
268 | raw_smp_processor_id(), current->comm, |
269 | current->pid, lock); | |
fb1c8f93 IM |
270 | dump_stack(); |
271 | } | |
272 | } | |
273 | } | |
72f0b4e2 | 274 | #endif |
fb1c8f93 | 275 | |
9828ea9d | 276 | void do_raw_write_lock(rwlock_t *lock) |
fb1c8f93 IM |
277 | { |
278 | debug_write_lock_before(lock); | |
e5931943 | 279 | arch_write_lock(&lock->raw_lock); |
fb1c8f93 IM |
280 | debug_write_lock_after(lock); |
281 | } | |
282 | ||
9828ea9d | 283 | int do_raw_write_trylock(rwlock_t *lock) |
fb1c8f93 | 284 | { |
e5931943 | 285 | int ret = arch_write_trylock(&lock->raw_lock); |
fb1c8f93 IM |
286 | |
287 | if (ret) | |
288 | debug_write_lock_after(lock); | |
289 | #ifndef CONFIG_SMP | |
290 | /* | |
291 | * Must not happen on UP: | |
292 | */ | |
293 | RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP"); | |
294 | #endif | |
295 | return ret; | |
296 | } | |
297 | ||
9828ea9d | 298 | void do_raw_write_unlock(rwlock_t *lock) |
fb1c8f93 IM |
299 | { |
300 | debug_write_unlock(lock); | |
e5931943 | 301 | arch_write_unlock(&lock->raw_lock); |
fb1c8f93 | 302 | } |