]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (2004) Linus Torvalds | |
3 | * | |
4 | * Author: Zwane Mwaikambo <[email protected]> | |
5 | * | |
fb1c8f93 IM |
6 | * Copyright (2004, 2005) Ingo Molnar |
7 | * | |
8 | * This file contains the spinlock/rwlock implementations for the | |
9 | * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) | |
1da177e4 LT |
10 | */ |
11 | ||
12 | #include <linux/config.h> | |
13 | #include <linux/linkage.h> | |
14 | #include <linux/preempt.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/module.h> | |
18 | ||
19 | /* | |
20 | * Generic declaration of the raw read_trylock() function, | |
21 | * architectures are supposed to optimize this: | |
22 | */ | |
fb1c8f93 | 23 | int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock) |
1da177e4 | 24 | { |
fb1c8f93 | 25 | __raw_read_lock(lock); |
1da177e4 LT |
26 | return 1; |
27 | } | |
fb1c8f93 | 28 | EXPORT_SYMBOL(generic__raw_read_trylock); |
1da177e4 LT |
29 | |
30 | int __lockfunc _spin_trylock(spinlock_t *lock) | |
31 | { | |
32 | preempt_disable(); | |
33 | if (_raw_spin_trylock(lock)) | |
34 | return 1; | |
35 | ||
36 | preempt_enable(); | |
37 | return 0; | |
38 | } | |
39 | EXPORT_SYMBOL(_spin_trylock); | |
40 | ||
41 | int __lockfunc _read_trylock(rwlock_t *lock) | |
42 | { | |
43 | preempt_disable(); | |
44 | if (_raw_read_trylock(lock)) | |
45 | return 1; | |
46 | ||
47 | preempt_enable(); | |
48 | return 0; | |
49 | } | |
50 | EXPORT_SYMBOL(_read_trylock); | |
51 | ||
52 | int __lockfunc _write_trylock(rwlock_t *lock) | |
53 | { | |
54 | preempt_disable(); | |
55 | if (_raw_write_trylock(lock)) | |
56 | return 1; | |
57 | ||
58 | preempt_enable(); | |
59 | return 0; | |
60 | } | |
61 | EXPORT_SYMBOL(_write_trylock); | |
62 | ||
fb1c8f93 | 63 | #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) |
1da177e4 LT |
64 | |
65 | void __lockfunc _read_lock(rwlock_t *lock) | |
66 | { | |
67 | preempt_disable(); | |
68 | _raw_read_lock(lock); | |
69 | } | |
70 | EXPORT_SYMBOL(_read_lock); | |
71 | ||
72 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | |
73 | { | |
74 | unsigned long flags; | |
75 | ||
76 | local_irq_save(flags); | |
77 | preempt_disable(); | |
fb1c8f93 | 78 | _raw_spin_lock_flags(lock, &flags); |
1da177e4 LT |
79 | return flags; |
80 | } | |
81 | EXPORT_SYMBOL(_spin_lock_irqsave); | |
82 | ||
83 | void __lockfunc _spin_lock_irq(spinlock_t *lock) | |
84 | { | |
85 | local_irq_disable(); | |
86 | preempt_disable(); | |
87 | _raw_spin_lock(lock); | |
88 | } | |
89 | EXPORT_SYMBOL(_spin_lock_irq); | |
90 | ||
91 | void __lockfunc _spin_lock_bh(spinlock_t *lock) | |
92 | { | |
93 | local_bh_disable(); | |
94 | preempt_disable(); | |
95 | _raw_spin_lock(lock); | |
96 | } | |
97 | EXPORT_SYMBOL(_spin_lock_bh); | |
98 | ||
99 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | |
100 | { | |
101 | unsigned long flags; | |
102 | ||
103 | local_irq_save(flags); | |
104 | preempt_disable(); | |
105 | _raw_read_lock(lock); | |
106 | return flags; | |
107 | } | |
108 | EXPORT_SYMBOL(_read_lock_irqsave); | |
109 | ||
110 | void __lockfunc _read_lock_irq(rwlock_t *lock) | |
111 | { | |
112 | local_irq_disable(); | |
113 | preempt_disable(); | |
114 | _raw_read_lock(lock); | |
115 | } | |
116 | EXPORT_SYMBOL(_read_lock_irq); | |
117 | ||
118 | void __lockfunc _read_lock_bh(rwlock_t *lock) | |
119 | { | |
120 | local_bh_disable(); | |
121 | preempt_disable(); | |
122 | _raw_read_lock(lock); | |
123 | } | |
124 | EXPORT_SYMBOL(_read_lock_bh); | |
125 | ||
126 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | |
127 | { | |
128 | unsigned long flags; | |
129 | ||
130 | local_irq_save(flags); | |
131 | preempt_disable(); | |
132 | _raw_write_lock(lock); | |
133 | return flags; | |
134 | } | |
135 | EXPORT_SYMBOL(_write_lock_irqsave); | |
136 | ||
137 | void __lockfunc _write_lock_irq(rwlock_t *lock) | |
138 | { | |
139 | local_irq_disable(); | |
140 | preempt_disable(); | |
141 | _raw_write_lock(lock); | |
142 | } | |
143 | EXPORT_SYMBOL(_write_lock_irq); | |
144 | ||
145 | void __lockfunc _write_lock_bh(rwlock_t *lock) | |
146 | { | |
147 | local_bh_disable(); | |
148 | preempt_disable(); | |
149 | _raw_write_lock(lock); | |
150 | } | |
151 | EXPORT_SYMBOL(_write_lock_bh); | |
152 | ||
153 | void __lockfunc _spin_lock(spinlock_t *lock) | |
154 | { | |
155 | preempt_disable(); | |
156 | _raw_spin_lock(lock); | |
157 | } | |
158 | ||
159 | EXPORT_SYMBOL(_spin_lock); | |
160 | ||
161 | void __lockfunc _write_lock(rwlock_t *lock) | |
162 | { | |
163 | preempt_disable(); | |
164 | _raw_write_lock(lock); | |
165 | } | |
166 | ||
167 | EXPORT_SYMBOL(_write_lock); | |
168 | ||
169 | #else /* CONFIG_PREEMPT: */ | |
170 | ||
171 | /* | |
172 | * This could be a long-held lock. We both prepare to spin for a long | |
173 | * time (making _this_ CPU preemptable if possible), and we also signal | |
174 | * towards that other CPU that it should break the lock ASAP. | |
175 | * | |
176 | * (We do this in a function because inlining it would be excessive.) | |
177 | */ | |
178 | ||
179 | #define BUILD_LOCK_OPS(op, locktype) \ | |
180 | void __lockfunc _##op##_lock(locktype##_t *lock) \ | |
181 | { \ | |
182 | preempt_disable(); \ | |
183 | for (;;) { \ | |
184 | if (likely(_raw_##op##_trylock(lock))) \ | |
185 | break; \ | |
186 | preempt_enable(); \ | |
187 | if (!(lock)->break_lock) \ | |
188 | (lock)->break_lock = 1; \ | |
189 | while (!op##_can_lock(lock) && (lock)->break_lock) \ | |
190 | cpu_relax(); \ | |
191 | preempt_disable(); \ | |
192 | } \ | |
193 | (lock)->break_lock = 0; \ | |
194 | } \ | |
195 | \ | |
196 | EXPORT_SYMBOL(_##op##_lock); \ | |
197 | \ | |
198 | unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \ | |
199 | { \ | |
200 | unsigned long flags; \ | |
201 | \ | |
202 | preempt_disable(); \ | |
203 | for (;;) { \ | |
204 | local_irq_save(flags); \ | |
205 | if (likely(_raw_##op##_trylock(lock))) \ | |
206 | break; \ | |
207 | local_irq_restore(flags); \ | |
208 | \ | |
209 | preempt_enable(); \ | |
210 | if (!(lock)->break_lock) \ | |
211 | (lock)->break_lock = 1; \ | |
212 | while (!op##_can_lock(lock) && (lock)->break_lock) \ | |
213 | cpu_relax(); \ | |
214 | preempt_disable(); \ | |
215 | } \ | |
216 | (lock)->break_lock = 0; \ | |
217 | return flags; \ | |
218 | } \ | |
219 | \ | |
220 | EXPORT_SYMBOL(_##op##_lock_irqsave); \ | |
221 | \ | |
222 | void __lockfunc _##op##_lock_irq(locktype##_t *lock) \ | |
223 | { \ | |
224 | _##op##_lock_irqsave(lock); \ | |
225 | } \ | |
226 | \ | |
227 | EXPORT_SYMBOL(_##op##_lock_irq); \ | |
228 | \ | |
229 | void __lockfunc _##op##_lock_bh(locktype##_t *lock) \ | |
230 | { \ | |
231 | unsigned long flags; \ | |
232 | \ | |
233 | /* */ \ | |
234 | /* Careful: we must exclude softirqs too, hence the */ \ | |
235 | /* irq-disabling. We use the generic preemption-aware */ \ | |
236 | /* function: */ \ | |
237 | /**/ \ | |
238 | flags = _##op##_lock_irqsave(lock); \ | |
239 | local_bh_disable(); \ | |
240 | local_irq_restore(flags); \ | |
241 | } \ | |
242 | \ | |
243 | EXPORT_SYMBOL(_##op##_lock_bh) | |
244 | ||
245 | /* | |
246 | * Build preemption-friendly versions of the following | |
247 | * lock-spinning functions: | |
248 | * | |
249 | * _[spin|read|write]_lock() | |
250 | * _[spin|read|write]_lock_irq() | |
251 | * _[spin|read|write]_lock_irqsave() | |
252 | * _[spin|read|write]_lock_bh() | |
253 | */ | |
254 | BUILD_LOCK_OPS(spin, spinlock); | |
255 | BUILD_LOCK_OPS(read, rwlock); | |
256 | BUILD_LOCK_OPS(write, rwlock); | |
257 | ||
258 | #endif /* CONFIG_PREEMPT */ | |
259 | ||
260 | void __lockfunc _spin_unlock(spinlock_t *lock) | |
261 | { | |
262 | _raw_spin_unlock(lock); | |
263 | preempt_enable(); | |
264 | } | |
265 | EXPORT_SYMBOL(_spin_unlock); | |
266 | ||
267 | void __lockfunc _write_unlock(rwlock_t *lock) | |
268 | { | |
269 | _raw_write_unlock(lock); | |
270 | preempt_enable(); | |
271 | } | |
272 | EXPORT_SYMBOL(_write_unlock); | |
273 | ||
274 | void __lockfunc _read_unlock(rwlock_t *lock) | |
275 | { | |
276 | _raw_read_unlock(lock); | |
277 | preempt_enable(); | |
278 | } | |
279 | EXPORT_SYMBOL(_read_unlock); | |
280 | ||
281 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | |
282 | { | |
283 | _raw_spin_unlock(lock); | |
284 | local_irq_restore(flags); | |
285 | preempt_enable(); | |
286 | } | |
287 | EXPORT_SYMBOL(_spin_unlock_irqrestore); | |
288 | ||
289 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) | |
290 | { | |
291 | _raw_spin_unlock(lock); | |
292 | local_irq_enable(); | |
293 | preempt_enable(); | |
294 | } | |
295 | EXPORT_SYMBOL(_spin_unlock_irq); | |
296 | ||
297 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | |
298 | { | |
299 | _raw_spin_unlock(lock); | |
10f02d1c | 300 | preempt_enable_no_resched(); |
1da177e4 LT |
301 | local_bh_enable(); |
302 | } | |
303 | EXPORT_SYMBOL(_spin_unlock_bh); | |
304 | ||
305 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |
306 | { | |
307 | _raw_read_unlock(lock); | |
308 | local_irq_restore(flags); | |
309 | preempt_enable(); | |
310 | } | |
311 | EXPORT_SYMBOL(_read_unlock_irqrestore); | |
312 | ||
313 | void __lockfunc _read_unlock_irq(rwlock_t *lock) | |
314 | { | |
315 | _raw_read_unlock(lock); | |
316 | local_irq_enable(); | |
317 | preempt_enable(); | |
318 | } | |
319 | EXPORT_SYMBOL(_read_unlock_irq); | |
320 | ||
321 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | |
322 | { | |
323 | _raw_read_unlock(lock); | |
10f02d1c | 324 | preempt_enable_no_resched(); |
1da177e4 LT |
325 | local_bh_enable(); |
326 | } | |
327 | EXPORT_SYMBOL(_read_unlock_bh); | |
328 | ||
329 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | |
330 | { | |
331 | _raw_write_unlock(lock); | |
332 | local_irq_restore(flags); | |
333 | preempt_enable(); | |
334 | } | |
335 | EXPORT_SYMBOL(_write_unlock_irqrestore); | |
336 | ||
337 | void __lockfunc _write_unlock_irq(rwlock_t *lock) | |
338 | { | |
339 | _raw_write_unlock(lock); | |
340 | local_irq_enable(); | |
341 | preempt_enable(); | |
342 | } | |
343 | EXPORT_SYMBOL(_write_unlock_irq); | |
344 | ||
345 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | |
346 | { | |
347 | _raw_write_unlock(lock); | |
10f02d1c | 348 | preempt_enable_no_resched(); |
1da177e4 LT |
349 | local_bh_enable(); |
350 | } | |
351 | EXPORT_SYMBOL(_write_unlock_bh); | |
352 | ||
353 | int __lockfunc _spin_trylock_bh(spinlock_t *lock) | |
354 | { | |
355 | local_bh_disable(); | |
356 | preempt_disable(); | |
357 | if (_raw_spin_trylock(lock)) | |
358 | return 1; | |
359 | ||
10f02d1c | 360 | preempt_enable_no_resched(); |
1da177e4 LT |
361 | local_bh_enable(); |
362 | return 0; | |
363 | } | |
364 | EXPORT_SYMBOL(_spin_trylock_bh); | |
365 | ||
366 | int in_lock_functions(unsigned long addr) | |
367 | { | |
368 | /* Linker adds these: start and end of __lockfunc functions */ | |
369 | extern char __lock_text_start[], __lock_text_end[]; | |
370 | ||
371 | return addr >= (unsigned long)__lock_text_start | |
372 | && addr < (unsigned long)__lock_text_end; | |
373 | } | |
374 | EXPORT_SYMBOL(in_lock_functions); |