]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* spinlock.h: 64-bit Sparc spinlock support. |
2 | * | |
3 | * Copyright (C) 1997 David S. Miller ([email protected]) | |
4 | */ | |
5 | ||
6 | #ifndef __SPARC64_SPINLOCK_H | |
7 | #define __SPARC64_SPINLOCK_H | |
8 | ||
9 | #include <linux/config.h> | |
10 | #include <linux/threads.h> /* For NR_CPUS */ | |
11 | ||
12 | #ifndef __ASSEMBLY__ | |
13 | ||
14 | /* To get debugging spinlocks which detect and catch | |
15 | * deadlock situations, set CONFIG_DEBUG_SPINLOCK | |
16 | * and rebuild your kernel. | |
17 | */ | |
18 | ||
19 | /* All of these locking primitives are expected to work properly | |
20 | * even in an RMO memory model, which currently is what the kernel | |
21 | * runs in. | |
22 | * | |
23 | * There is another issue. Because we play games to save cycles | |
24 | * in the non-contention case, we need to be extra careful about | |
25 | * branch targets into the "spinning" code. They live in their | |
26 | * own section, but the newer V9 branches have a shorter range | |
27 | * than the traditional 32-bit sparc branch variants. The rule | |
28 | * is that the branches that go into and out of the spinner sections | |
29 | * must be pre-V9 branches. | |
30 | */ | |
31 | ||
32 | #ifndef CONFIG_DEBUG_SPINLOCK | |
33 | ||
489ec5f5 AV |
34 | typedef struct { |
35 | volatile unsigned char lock; | |
36 | #ifdef CONFIG_PREEMPT | |
37 | unsigned int break_lock; | |
38 | #endif | |
39 | } spinlock_t; | |
40 | #define SPIN_LOCK_UNLOCKED (spinlock_t) {0,} | |
1da177e4 | 41 | |
489ec5f5 AV |
42 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) |
43 | #define spin_is_locked(lp) ((lp)->lock != 0) | |
1da177e4 | 44 | |
489ec5f5 | 45 | #define spin_unlock_wait(lp) \ |
1da177e4 | 46 | do { membar("#LoadLoad"); \ |
9a59c186 | 47 | } while((lp)->lock) |
1da177e4 LT |
48 | |
49 | static inline void _raw_spin_lock(spinlock_t *lock) | |
50 | { | |
51 | unsigned long tmp; | |
52 | ||
53 | __asm__ __volatile__( | |
54 | "1: ldstub [%1], %0\n" | |
55 | " brnz,pn %0, 2f\n" | |
56 | " membar #StoreLoad | #StoreStore\n" | |
57 | " .subsection 2\n" | |
58 | "2: ldub [%1], %0\n" | |
59 | " brnz,pt %0, 2b\n" | |
60 | " membar #LoadLoad\n" | |
61 | " ba,a,pt %%xcc, 1b\n" | |
62 | " .previous" | |
63 | : "=&r" (tmp) | |
64 | : "r" (lock) | |
65 | : "memory"); | |
66 | } | |
67 | ||
68 | static inline int _raw_spin_trylock(spinlock_t *lock) | |
69 | { | |
70 | unsigned long result; | |
71 | ||
72 | __asm__ __volatile__( | |
73 | " ldstub [%1], %0\n" | |
74 | " membar #StoreLoad | #StoreStore" | |
75 | : "=r" (result) | |
76 | : "r" (lock) | |
77 | : "memory"); | |
78 | ||
79 | return (result == 0UL); | |
80 | } | |
81 | ||
82 | static inline void _raw_spin_unlock(spinlock_t *lock) | |
83 | { | |
84 | __asm__ __volatile__( | |
85 | " membar #StoreStore | #LoadStore\n" | |
86 | " stb %%g0, [%0]" | |
87 | : /* No outputs */ | |
88 | : "r" (lock) | |
89 | : "memory"); | |
90 | } | |
91 | ||
92 | static inline void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) | |
93 | { | |
94 | unsigned long tmp1, tmp2; | |
95 | ||
96 | __asm__ __volatile__( | |
97 | "1: ldstub [%2], %0\n" | |
98 | " brnz,pn %0, 2f\n" | |
99 | " membar #StoreLoad | #StoreStore\n" | |
100 | " .subsection 2\n" | |
101 | "2: rdpr %%pil, %1\n" | |
102 | " wrpr %3, %%pil\n" | |
103 | "3: ldub [%2], %0\n" | |
104 | " brnz,pt %0, 3b\n" | |
105 | " membar #LoadLoad\n" | |
106 | " ba,pt %%xcc, 1b\n" | |
107 | " wrpr %1, %%pil\n" | |
108 | " .previous" | |
109 | : "=&r" (tmp1), "=&r" (tmp2) | |
110 | : "r"(lock), "r"(flags) | |
111 | : "memory"); | |
112 | } | |
113 | ||
114 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | |
115 | ||
116 | typedef struct { | |
489ec5f5 | 117 | volatile unsigned char lock; |
1da177e4 | 118 | unsigned int owner_pc, owner_cpu; |
489ec5f5 AV |
119 | #ifdef CONFIG_PREEMPT |
120 | unsigned int break_lock; | |
121 | #endif | |
1da177e4 LT |
122 | } spinlock_t; |
123 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff } | |
489ec5f5 AV |
124 | #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0) |
125 | #define spin_is_locked(__lock) ((__lock)->lock != 0) | |
1da177e4 LT |
126 | #define spin_unlock_wait(__lock) \ |
127 | do { \ | |
128 | membar("#LoadLoad"); \ | |
489ec5f5 | 129 | } while((__lock)->lock) |
1da177e4 LT |
130 | |
131 | extern void _do_spin_lock (spinlock_t *lock, char *str); | |
132 | extern void _do_spin_unlock (spinlock_t *lock); | |
133 | extern int _do_spin_trylock (spinlock_t *lock); | |
134 | ||
135 | #define _raw_spin_trylock(lp) _do_spin_trylock(lp) | |
136 | #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock") | |
137 | #define _raw_spin_unlock(lock) _do_spin_unlock(lock) | |
138 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | |
139 | ||
140 | #endif /* CONFIG_DEBUG_SPINLOCK */ | |
141 | ||
142 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ | |
143 | ||
144 | #ifndef CONFIG_DEBUG_SPINLOCK | |
145 | ||
489ec5f5 AV |
146 | typedef struct { |
147 | volatile unsigned int lock; | |
148 | #ifdef CONFIG_PREEMPT | |
149 | unsigned int break_lock; | |
150 | #endif | |
151 | } rwlock_t; | |
9a59c186 | 152 | #define RW_LOCK_UNLOCKED (rwlock_t) {0,} |
1da177e4 LT |
153 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) |
154 | ||
155 | static void inline __read_lock(rwlock_t *lock) | |
156 | { | |
157 | unsigned long tmp1, tmp2; | |
158 | ||
159 | __asm__ __volatile__ ( | |
160 | "1: ldsw [%2], %0\n" | |
161 | " brlz,pn %0, 2f\n" | |
162 | "4: add %0, 1, %1\n" | |
163 | " cas [%2], %0, %1\n" | |
164 | " cmp %0, %1\n" | |
165 | " bne,pn %%icc, 1b\n" | |
166 | " membar #StoreLoad | #StoreStore\n" | |
167 | " .subsection 2\n" | |
168 | "2: ldsw [%2], %0\n" | |
169 | " brlz,pt %0, 2b\n" | |
170 | " membar #LoadLoad\n" | |
171 | " ba,a,pt %%xcc, 4b\n" | |
172 | " .previous" | |
173 | : "=&r" (tmp1), "=&r" (tmp2) | |
174 | : "r" (lock) | |
175 | : "memory"); | |
176 | } | |
177 | ||
178 | static void inline __read_unlock(rwlock_t *lock) | |
179 | { | |
180 | unsigned long tmp1, tmp2; | |
181 | ||
182 | __asm__ __volatile__( | |
183 | " membar #StoreLoad | #LoadLoad\n" | |
184 | "1: lduw [%2], %0\n" | |
185 | " sub %0, 1, %1\n" | |
186 | " cas [%2], %0, %1\n" | |
187 | " cmp %0, %1\n" | |
188 | " bne,pn %%xcc, 1b\n" | |
189 | " nop" | |
190 | : "=&r" (tmp1), "=&r" (tmp2) | |
191 | : "r" (lock) | |
192 | : "memory"); | |
193 | } | |
194 | ||
195 | static void inline __write_lock(rwlock_t *lock) | |
196 | { | |
197 | unsigned long mask, tmp1, tmp2; | |
198 | ||
199 | mask = 0x80000000UL; | |
200 | ||
201 | __asm__ __volatile__( | |
202 | "1: lduw [%2], %0\n" | |
203 | " brnz,pn %0, 2f\n" | |
204 | "4: or %0, %3, %1\n" | |
205 | " cas [%2], %0, %1\n" | |
206 | " cmp %0, %1\n" | |
207 | " bne,pn %%icc, 1b\n" | |
208 | " membar #StoreLoad | #StoreStore\n" | |
209 | " .subsection 2\n" | |
210 | "2: lduw [%2], %0\n" | |
211 | " brnz,pt %0, 2b\n" | |
212 | " membar #LoadLoad\n" | |
213 | " ba,a,pt %%xcc, 4b\n" | |
214 | " .previous" | |
215 | : "=&r" (tmp1), "=&r" (tmp2) | |
216 | : "r" (lock), "r" (mask) | |
217 | : "memory"); | |
218 | } | |
219 | ||
220 | static void inline __write_unlock(rwlock_t *lock) | |
221 | { | |
222 | __asm__ __volatile__( | |
223 | " membar #LoadStore | #StoreStore\n" | |
224 | " stw %%g0, [%0]" | |
225 | : /* no outputs */ | |
226 | : "r" (lock) | |
227 | : "memory"); | |
228 | } | |
229 | ||
230 | static int inline __write_trylock(rwlock_t *lock) | |
231 | { | |
232 | unsigned long mask, tmp1, tmp2, result; | |
233 | ||
234 | mask = 0x80000000UL; | |
235 | ||
236 | __asm__ __volatile__( | |
237 | " mov 0, %2\n" | |
238 | "1: lduw [%3], %0\n" | |
239 | " brnz,pn %0, 2f\n" | |
240 | " or %0, %4, %1\n" | |
241 | " cas [%3], %0, %1\n" | |
242 | " cmp %0, %1\n" | |
243 | " bne,pn %%icc, 1b\n" | |
244 | " membar #StoreLoad | #StoreStore\n" | |
245 | " mov 1, %2\n" | |
246 | "2:" | |
247 | : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result) | |
248 | : "r" (lock), "r" (mask) | |
249 | : "memory"); | |
250 | ||
251 | return result; | |
252 | } | |
253 | ||
254 | #define _raw_read_lock(p) __read_lock(p) | |
255 | #define _raw_read_unlock(p) __read_unlock(p) | |
256 | #define _raw_write_lock(p) __write_lock(p) | |
257 | #define _raw_write_unlock(p) __write_unlock(p) | |
258 | #define _raw_write_trylock(p) __write_trylock(p) | |
259 | ||
260 | #else /* !(CONFIG_DEBUG_SPINLOCK) */ | |
261 | ||
262 | typedef struct { | |
489ec5f5 | 263 | volatile unsigned long lock; |
1da177e4 LT |
264 | unsigned int writer_pc, writer_cpu; |
265 | unsigned int reader_pc[NR_CPUS]; | |
489ec5f5 AV |
266 | #ifdef CONFIG_PREEMPT |
267 | unsigned int break_lock; | |
268 | #endif | |
1da177e4 LT |
269 | } rwlock_t; |
270 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } } | |
271 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) | |
272 | ||
273 | extern void _do_read_lock(rwlock_t *rw, char *str); | |
274 | extern void _do_read_unlock(rwlock_t *rw, char *str); | |
275 | extern void _do_write_lock(rwlock_t *rw, char *str); | |
276 | extern void _do_write_unlock(rwlock_t *rw); | |
277 | extern int _do_write_trylock(rwlock_t *rw, char *str); | |
278 | ||
279 | #define _raw_read_lock(lock) \ | |
280 | do { unsigned long flags; \ | |
281 | local_irq_save(flags); \ | |
282 | _do_read_lock(lock, "read_lock"); \ | |
283 | local_irq_restore(flags); \ | |
284 | } while(0) | |
285 | ||
286 | #define _raw_read_unlock(lock) \ | |
287 | do { unsigned long flags; \ | |
288 | local_irq_save(flags); \ | |
289 | _do_read_unlock(lock, "read_unlock"); \ | |
290 | local_irq_restore(flags); \ | |
291 | } while(0) | |
292 | ||
293 | #define _raw_write_lock(lock) \ | |
294 | do { unsigned long flags; \ | |
295 | local_irq_save(flags); \ | |
296 | _do_write_lock(lock, "write_lock"); \ | |
297 | local_irq_restore(flags); \ | |
298 | } while(0) | |
299 | ||
300 | #define _raw_write_unlock(lock) \ | |
301 | do { unsigned long flags; \ | |
302 | local_irq_save(flags); \ | |
303 | _do_write_unlock(lock); \ | |
304 | local_irq_restore(flags); \ | |
305 | } while(0) | |
306 | ||
307 | #define _raw_write_trylock(lock) \ | |
308 | ({ unsigned long flags; \ | |
309 | int val; \ | |
310 | local_irq_save(flags); \ | |
311 | val = _do_write_trylock(lock, "write_trylock"); \ | |
312 | local_irq_restore(flags); \ | |
313 | val; \ | |
314 | }) | |
315 | ||
316 | #endif /* CONFIG_DEBUG_SPINLOCK */ | |
317 | ||
318 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | |
489ec5f5 AV |
319 | #define read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) |
320 | #define write_can_lock(rw) (!(rw)->lock) | |
1da177e4 LT |
321 | |
322 | #endif /* !(__ASSEMBLY__) */ | |
323 | ||
324 | #endif /* !(__SPARC64_SPINLOCK_H) */ |