]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
88ced031 | 3 | #ifdef __KERNEL__ |
1da177e4 LT |
4 | |
5 | /* | |
6 | * Simple spin lock operations. | |
7 | * | |
8 | * Copyright (C) 2001-2004 Paul Mackerras <[email protected]>, IBM | |
9 | * Copyright (C) 2001 Anton Blanchard <[email protected]>, IBM | |
10 | * Copyright (C) 2002 Dave Engebretsen <[email protected]>, IBM | |
11 | * Rework to support virtual processors | |
12 | * | |
13 | * Type of int is used as a full 64b word is not necessary. | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
fb1c8f93 IM |
19 | * |
20 | * (the type definitions are in asm/spinlock_types.h) | |
1da177e4 | 21 | */ |
0212ddd8 | 22 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
23 | #include <asm/paca.h> |
24 | #include <asm/hvcall.h> | |
1da44037 | 25 | #include <asm/iseries/hv_call.h> |
0212ddd8 PM |
26 | #endif |
27 | #include <asm/asm-compat.h> | |
28 | #include <asm/synch.h> | |
1da177e4 | 29 | |
fb1c8f93 | 30 | #define __raw_spin_is_locked(x) ((x)->slock != 0) |
1da177e4 | 31 | |
0212ddd8 PM |
32 | #ifdef CONFIG_PPC64 |
33 | /* use 0x800000yy when locked, where yy == CPU number */ | |
34 | #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) | |
35 | #else | |
36 | #define LOCK_TOKEN 1 | |
37 | #endif | |
38 | ||
f007cacf PM |
39 | #if defined(CONFIG_PPC64) && defined(CONFIG_SMP) |
40 | #define CLEAR_IO_SYNC (get_paca()->io_sync = 0) | |
41 | #define SYNC_IO do { \ | |
42 | if (unlikely(get_paca()->io_sync)) { \ | |
43 | mb(); \ | |
44 | get_paca()->io_sync = 0; \ | |
45 | } \ | |
46 | } while (0) | |
47 | #else | |
48 | #define CLEAR_IO_SYNC | |
49 | #define SYNC_IO | |
50 | #endif | |
51 | ||
fb1c8f93 IM |
52 | /* |
53 | * This returns the old value in the lock, so we succeeded | |
54 | * in getting the lock if the return value is 0. | |
55 | */ | |
56 | static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) | |
57 | { | |
0212ddd8 | 58 | unsigned long tmp, token; |
1da177e4 | 59 | |
0212ddd8 | 60 | token = LOCK_TOKEN; |
fb1c8f93 | 61 | __asm__ __volatile__( |
144b9c13 | 62 | "1: lwarx %0,0,%2\n\ |
fb1c8f93 IM |
63 | cmpwi 0,%0,0\n\ |
64 | bne- 2f\n\ | |
65 | stwcx. %1,0,%2\n\ | |
66 | bne- 1b\n\ | |
67 | isync\n\ | |
0212ddd8 PM |
68 | 2:" : "=&r" (tmp) |
69 | : "r" (token), "r" (&lock->slock) | |
fb1c8f93 | 70 | : "cr0", "memory"); |
1da177e4 | 71 | |
fb1c8f93 IM |
72 | return tmp; |
73 | } | |
1da177e4 | 74 | |
fb1c8f93 | 75 | static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) |
1da177e4 | 76 | { |
f007cacf | 77 | CLEAR_IO_SYNC; |
fb1c8f93 | 78 | return __spin_trylock(lock) == 0; |
1da177e4 LT |
79 | } |
80 | ||
81 | /* | |
82 | * On a system with shared processors (that is, where a physical | |
83 | * processor is multiplexed between several virtual processors), | |
84 | * there is no point spinning on a lock if the holder of the lock | |
85 | * isn't currently scheduled on a physical processor. Instead | |
86 | * we detect this situation and ask the hypervisor to give the | |
87 | * rest of our timeslice to the lock holder. | |
88 | * | |
89 | * So that we can tell which virtual processor is holding a lock, | |
90 | * we put 0x80000000 | smp_processor_id() in the lock when it is | |
91 | * held. Conveniently, we have a word in the paca that holds this | |
92 | * value. | |
93 | */ | |
94 | ||
95 | #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) | |
96 | /* We only yield to the hypervisor if we are in shared processor mode */ | |
3356bb9f | 97 | #define SHARED_PROCESSOR (get_lppaca()->shared_proc) |
fb1c8f93 IM |
98 | extern void __spin_yield(raw_spinlock_t *lock); |
99 | extern void __rw_yield(raw_rwlock_t *lock); | |
1da177e4 LT |
100 | #else /* SPLPAR || ISERIES */ |
101 | #define __spin_yield(x) barrier() | |
102 | #define __rw_yield(x) barrier() | |
103 | #define SHARED_PROCESSOR 0 | |
104 | #endif | |
1da177e4 | 105 | |
fb1c8f93 | 106 | static void __inline__ __raw_spin_lock(raw_spinlock_t *lock) |
1da177e4 | 107 | { |
f007cacf | 108 | CLEAR_IO_SYNC; |
1da177e4 LT |
109 | while (1) { |
110 | if (likely(__spin_trylock(lock) == 0)) | |
111 | break; | |
112 | do { | |
113 | HMT_low(); | |
114 | if (SHARED_PROCESSOR) | |
115 | __spin_yield(lock); | |
fb1c8f93 | 116 | } while (unlikely(lock->slock != 0)); |
1da177e4 LT |
117 | HMT_medium(); |
118 | } | |
119 | } | |
120 | ||
fb1c8f93 | 121 | static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
1da177e4 LT |
122 | { |
123 | unsigned long flags_dis; | |
124 | ||
f007cacf | 125 | CLEAR_IO_SYNC; |
1da177e4 LT |
126 | while (1) { |
127 | if (likely(__spin_trylock(lock) == 0)) | |
128 | break; | |
129 | local_save_flags(flags_dis); | |
130 | local_irq_restore(flags); | |
131 | do { | |
132 | HMT_low(); | |
133 | if (SHARED_PROCESSOR) | |
134 | __spin_yield(lock); | |
fb1c8f93 | 135 | } while (unlikely(lock->slock != 0)); |
1da177e4 LT |
136 | HMT_medium(); |
137 | local_irq_restore(flags_dis); | |
138 | } | |
139 | } | |
140 | ||
fb1c8f93 IM |
141 | static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) |
142 | { | |
f007cacf | 143 | SYNC_IO; |
144b9c13 AB |
144 | __asm__ __volatile__("# __raw_spin_unlock\n\t" |
145 | LWSYNC_ON_SMP: : :"memory"); | |
fb1c8f93 IM |
146 | lock->slock = 0; |
147 | } | |
148 | ||
0212ddd8 | 149 | #ifdef CONFIG_PPC64 |
fb1c8f93 | 150 | extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); |
0212ddd8 PM |
151 | #else |
152 | #define __raw_spin_unlock_wait(lock) \ | |
153 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | |
154 | #endif | |
fb1c8f93 | 155 | |
1da177e4 LT |
156 | /* |
157 | * Read-write spinlocks, allowing multiple readers | |
158 | * but only one writer. | |
159 | * | |
160 | * NOTE! it is quite common to have readers in interrupts | |
161 | * but no interrupt writers. For those circumstances we | |
162 | * can "mix" irq-safe locks - any writer needs to get a | |
163 | * irq-safe write-lock, but readers can get non-irqsafe | |
164 | * read-locks. | |
165 | */ | |
1da177e4 | 166 | |
fb1c8f93 IM |
167 | #define __raw_read_can_lock(rw) ((rw)->lock >= 0) |
168 | #define __raw_write_can_lock(rw) (!(rw)->lock) | |
1da177e4 | 169 | |
0212ddd8 PM |
170 | #ifdef CONFIG_PPC64 |
171 | #define __DO_SIGN_EXTEND "extsw %0,%0\n" | |
172 | #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */ | |
173 | #else | |
174 | #define __DO_SIGN_EXTEND | |
175 | #define WRLOCK_TOKEN (-1) | |
176 | #endif | |
177 | ||
1da177e4 LT |
178 | /* |
179 | * This returns the old value in the lock + 1, | |
180 | * so we got a read lock if the return value is > 0. | |
181 | */ | |
fb1c8f93 | 182 | static long __inline__ __read_trylock(raw_rwlock_t *rw) |
1da177e4 LT |
183 | { |
184 | long tmp; | |
185 | ||
186 | __asm__ __volatile__( | |
144b9c13 | 187 | "1: lwarx %0,0,%1\n" |
0212ddd8 PM |
188 | __DO_SIGN_EXTEND |
189 | " addic. %0,%0,1\n\ | |
190 | ble- 2f\n" | |
191 | PPC405_ERR77(0,%1) | |
192 | " stwcx. %0,0,%1\n\ | |
1da177e4 LT |
193 | bne- 1b\n\ |
194 | isync\n\ | |
195 | 2:" : "=&r" (tmp) | |
196 | : "r" (&rw->lock) | |
197 | : "cr0", "xer", "memory"); | |
198 | ||
199 | return tmp; | |
200 | } | |
201 | ||
1da177e4 LT |
202 | /* |
203 | * This returns the old value in the lock, | |
204 | * so we got the write lock if the return value is 0. | |
205 | */ | |
fb1c8f93 | 206 | static __inline__ long __write_trylock(raw_rwlock_t *rw) |
1da177e4 | 207 | { |
0212ddd8 | 208 | long tmp, token; |
1da177e4 | 209 | |
0212ddd8 | 210 | token = WRLOCK_TOKEN; |
1da177e4 | 211 | __asm__ __volatile__( |
144b9c13 | 212 | "1: lwarx %0,0,%2\n\ |
1da177e4 | 213 | cmpwi 0,%0,0\n\ |
0212ddd8 PM |
214 | bne- 2f\n" |
215 | PPC405_ERR77(0,%1) | |
216 | " stwcx. %1,0,%2\n\ | |
1da177e4 LT |
217 | bne- 1b\n\ |
218 | isync\n\ | |
0212ddd8 PM |
219 | 2:" : "=&r" (tmp) |
220 | : "r" (token), "r" (&rw->lock) | |
1da177e4 LT |
221 | : "cr0", "memory"); |
222 | ||
223 | return tmp; | |
224 | } | |
225 | ||
fb1c8f93 | 226 | static void __inline__ __raw_read_lock(raw_rwlock_t *rw) |
1da177e4 | 227 | { |
fb1c8f93 IM |
228 | while (1) { |
229 | if (likely(__read_trylock(rw) > 0)) | |
230 | break; | |
231 | do { | |
232 | HMT_low(); | |
233 | if (SHARED_PROCESSOR) | |
234 | __rw_yield(rw); | |
235 | } while (unlikely(rw->lock < 0)); | |
236 | HMT_medium(); | |
237 | } | |
1da177e4 LT |
238 | } |
239 | ||
fb1c8f93 | 240 | static void __inline__ __raw_write_lock(raw_rwlock_t *rw) |
1da177e4 LT |
241 | { |
242 | while (1) { | |
243 | if (likely(__write_trylock(rw) == 0)) | |
244 | break; | |
245 | do { | |
246 | HMT_low(); | |
247 | if (SHARED_PROCESSOR) | |
248 | __rw_yield(rw); | |
d637413f | 249 | } while (unlikely(rw->lock != 0)); |
1da177e4 LT |
250 | HMT_medium(); |
251 | } | |
252 | } | |
253 | ||
fb1c8f93 IM |
254 | static int __inline__ __raw_read_trylock(raw_rwlock_t *rw) |
255 | { | |
256 | return __read_trylock(rw) > 0; | |
257 | } | |
258 | ||
259 | static int __inline__ __raw_write_trylock(raw_rwlock_t *rw) | |
260 | { | |
261 | return __write_trylock(rw) == 0; | |
262 | } | |
263 | ||
264 | static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) | |
265 | { | |
266 | long tmp; | |
267 | ||
268 | __asm__ __volatile__( | |
144b9c13 AB |
269 | "# read_unlock\n\t" |
270 | LWSYNC_ON_SMP | |
271 | "1: lwarx %0,0,%1\n\ | |
0212ddd8 PM |
272 | addic %0,%0,-1\n" |
273 | PPC405_ERR77(0,%1) | |
274 | " stwcx. %0,0,%1\n\ | |
fb1c8f93 IM |
275 | bne- 1b" |
276 | : "=&r"(tmp) | |
277 | : "r"(&rw->lock) | |
278 | : "cr0", "memory"); | |
279 | } | |
280 | ||
281 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) | |
282 | { | |
144b9c13 AB |
283 | __asm__ __volatile__("# write_unlock\n\t" |
284 | LWSYNC_ON_SMP: : :"memory"); | |
fb1c8f93 IM |
285 | rw->lock = 0; |
286 | } | |
287 | ||
ef6edc97 MS |
288 | #define _raw_spin_relax(lock) cpu_relax() |
289 | #define _raw_read_relax(lock) cpu_relax() | |
290 | #define _raw_write_relax(lock) cpu_relax() | |
291 | ||
88ced031 | 292 | #endif /* __KERNEL__ */ |
1da177e4 | 293 | #endif /* __ASM_SPINLOCK_H */ |