]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* spinlock.h: 64-bit Sparc spinlock support. |
2 | * | |
3 | * Copyright (C) 1997 David S. Miller ([email protected]) | |
4 | */ | |
5 | ||
6 | #ifndef __SPARC64_SPINLOCK_H | |
7 | #define __SPARC64_SPINLOCK_H | |
8 | ||
1da177e4 LT |
9 | #include <linux/threads.h> /* For NR_CPUS */ |
10 | ||
11 | #ifndef __ASSEMBLY__ | |
12 | ||
13 | /* To get debugging spinlocks which detect and catch | |
14 | * deadlock situations, set CONFIG_DEBUG_SPINLOCK | |
15 | * and rebuild your kernel. | |
16 | */ | |
17 | ||
18 | /* All of these locking primitives are expected to work properly | |
19 | * even in an RMO memory model, which currently is what the kernel | |
20 | * runs in. | |
21 | * | |
22 | * There is another issue. Because we play games to save cycles | |
23 | * in the non-contention case, we need to be extra careful about | |
24 | * branch targets into the "spinning" code. They live in their | |
25 | * own section, but the newer V9 branches have a shorter range | |
26 | * than the traditional 32-bit sparc branch variants. The rule | |
27 | * is that the branches that go into and out of the spinner sections | |
28 | * must be pre-V9 branches. | |
29 | */ | |
30 | ||
fb1c8f93 | 31 | #define __raw_spin_is_locked(lp) ((lp)->lock != 0) |
1da177e4 | 32 | |
fb1c8f93 IM |
33 | #define __raw_spin_unlock_wait(lp) \ |
34 | do { rmb(); \ | |
35 | } while((lp)->lock) | |
1da177e4 | 36 | |
fb1c8f93 | 37 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
1da177e4 LT |
38 | { |
39 | unsigned long tmp; | |
40 | ||
41 | __asm__ __volatile__( | |
42 | "1: ldstub [%1], %0\n" | |
b445e26c | 43 | " membar #StoreLoad | #StoreStore\n" |
1da177e4 | 44 | " brnz,pn %0, 2f\n" |
b445e26c | 45 | " nop\n" |
1da177e4 LT |
46 | " .subsection 2\n" |
47 | "2: ldub [%1], %0\n" | |
b445e26c | 48 | " membar #LoadLoad\n" |
1da177e4 | 49 | " brnz,pt %0, 2b\n" |
b445e26c | 50 | " nop\n" |
1da177e4 LT |
51 | " ba,a,pt %%xcc, 1b\n" |
52 | " .previous" | |
53 | : "=&r" (tmp) | |
54 | : "r" (lock) | |
55 | : "memory"); | |
56 | } | |
57 | ||
fb1c8f93 | 58 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
1da177e4 LT |
59 | { |
60 | unsigned long result; | |
61 | ||
62 | __asm__ __volatile__( | |
63 | " ldstub [%1], %0\n" | |
64 | " membar #StoreLoad | #StoreStore" | |
65 | : "=r" (result) | |
66 | : "r" (lock) | |
67 | : "memory"); | |
68 | ||
69 | return (result == 0UL); | |
70 | } | |
71 | ||
fb1c8f93 | 72 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4 LT |
73 | { |
74 | __asm__ __volatile__( | |
75 | " membar #StoreStore | #LoadStore\n" | |
76 | " stb %%g0, [%0]" | |
77 | : /* No outputs */ | |
78 | : "r" (lock) | |
79 | : "memory"); | |
80 | } | |
81 | ||
fb1c8f93 | 82 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
1da177e4 LT |
83 | { |
84 | unsigned long tmp1, tmp2; | |
85 | ||
86 | __asm__ __volatile__( | |
87 | "1: ldstub [%2], %0\n" | |
1da177e4 | 88 | " membar #StoreLoad | #StoreStore\n" |
b445e26c DM |
89 | " brnz,pn %0, 2f\n" |
90 | " nop\n" | |
1da177e4 LT |
91 | " .subsection 2\n" |
92 | "2: rdpr %%pil, %1\n" | |
93 | " wrpr %3, %%pil\n" | |
94 | "3: ldub [%2], %0\n" | |
1da177e4 | 95 | " membar #LoadLoad\n" |
b445e26c DM |
96 | " brnz,pt %0, 3b\n" |
97 | " nop\n" | |
1da177e4 | 98 | " ba,pt %%xcc, 1b\n" |
b445e26c | 99 | " wrpr %1, %%pil\n" |
1da177e4 LT |
100 | " .previous" |
101 | : "=&r" (tmp1), "=&r" (tmp2) | |
102 | : "r"(lock), "r"(flags) | |
103 | : "memory"); | |
104 | } | |
105 | ||
1da177e4 LT |
106 | /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ |
107 | ||
fb1c8f93 | 108 | static void inline __read_lock(raw_rwlock_t *lock) |
1da177e4 LT |
109 | { |
110 | unsigned long tmp1, tmp2; | |
111 | ||
112 | __asm__ __volatile__ ( | |
113 | "1: ldsw [%2], %0\n" | |
114 | " brlz,pn %0, 2f\n" | |
115 | "4: add %0, 1, %1\n" | |
116 | " cas [%2], %0, %1\n" | |
117 | " cmp %0, %1\n" | |
b445e26c | 118 | " membar #StoreLoad | #StoreStore\n" |
1da177e4 | 119 | " bne,pn %%icc, 1b\n" |
b445e26c | 120 | " nop\n" |
1da177e4 LT |
121 | " .subsection 2\n" |
122 | "2: ldsw [%2], %0\n" | |
b445e26c | 123 | " membar #LoadLoad\n" |
1da177e4 | 124 | " brlz,pt %0, 2b\n" |
b445e26c | 125 | " nop\n" |
1da177e4 LT |
126 | " ba,a,pt %%xcc, 4b\n" |
127 | " .previous" | |
128 | : "=&r" (tmp1), "=&r" (tmp2) | |
129 | : "r" (lock) | |
130 | : "memory"); | |
131 | } | |
132 | ||
d3ed309a DM |
133 | static int inline __read_trylock(raw_rwlock_t *lock) |
134 | { | |
135 | int tmp1, tmp2; | |
136 | ||
137 | __asm__ __volatile__ ( | |
138 | "1: ldsw [%2], %0\n" | |
139 | " brlz,a,pn %0, 2f\n" | |
140 | " mov 0, %0\n" | |
141 | " add %0, 1, %1\n" | |
142 | " cas [%2], %0, %1\n" | |
143 | " cmp %0, %1\n" | |
144 | " membar #StoreLoad | #StoreStore\n" | |
145 | " bne,pn %%icc, 1b\n" | |
146 | " mov 1, %0\n" | |
147 | "2:" | |
148 | : "=&r" (tmp1), "=&r" (tmp2) | |
149 | : "r" (lock) | |
150 | : "memory"); | |
151 | ||
152 | return tmp1; | |
153 | } | |
154 | ||
fb1c8f93 | 155 | static void inline __read_unlock(raw_rwlock_t *lock) |
1da177e4 LT |
156 | { |
157 | unsigned long tmp1, tmp2; | |
158 | ||
159 | __asm__ __volatile__( | |
160 | " membar #StoreLoad | #LoadLoad\n" | |
161 | "1: lduw [%2], %0\n" | |
162 | " sub %0, 1, %1\n" | |
163 | " cas [%2], %0, %1\n" | |
164 | " cmp %0, %1\n" | |
165 | " bne,pn %%xcc, 1b\n" | |
166 | " nop" | |
167 | : "=&r" (tmp1), "=&r" (tmp2) | |
168 | : "r" (lock) | |
169 | : "memory"); | |
170 | } | |
171 | ||
fb1c8f93 | 172 | static void inline __write_lock(raw_rwlock_t *lock) |
1da177e4 LT |
173 | { |
174 | unsigned long mask, tmp1, tmp2; | |
175 | ||
176 | mask = 0x80000000UL; | |
177 | ||
178 | __asm__ __volatile__( | |
179 | "1: lduw [%2], %0\n" | |
180 | " brnz,pn %0, 2f\n" | |
181 | "4: or %0, %3, %1\n" | |
182 | " cas [%2], %0, %1\n" | |
183 | " cmp %0, %1\n" | |
b445e26c | 184 | " membar #StoreLoad | #StoreStore\n" |
1da177e4 | 185 | " bne,pn %%icc, 1b\n" |
b445e26c | 186 | " nop\n" |
1da177e4 LT |
187 | " .subsection 2\n" |
188 | "2: lduw [%2], %0\n" | |
b445e26c | 189 | " membar #LoadLoad\n" |
1da177e4 | 190 | " brnz,pt %0, 2b\n" |
b445e26c | 191 | " nop\n" |
1da177e4 LT |
192 | " ba,a,pt %%xcc, 4b\n" |
193 | " .previous" | |
194 | : "=&r" (tmp1), "=&r" (tmp2) | |
195 | : "r" (lock), "r" (mask) | |
196 | : "memory"); | |
197 | } | |
198 | ||
fb1c8f93 | 199 | static void inline __write_unlock(raw_rwlock_t *lock) |
1da177e4 LT |
200 | { |
201 | __asm__ __volatile__( | |
202 | " membar #LoadStore | #StoreStore\n" | |
203 | " stw %%g0, [%0]" | |
204 | : /* no outputs */ | |
205 | : "r" (lock) | |
206 | : "memory"); | |
207 | } | |
208 | ||
fb1c8f93 | 209 | static int inline __write_trylock(raw_rwlock_t *lock) |
1da177e4 LT |
210 | { |
211 | unsigned long mask, tmp1, tmp2, result; | |
212 | ||
213 | mask = 0x80000000UL; | |
214 | ||
215 | __asm__ __volatile__( | |
216 | " mov 0, %2\n" | |
217 | "1: lduw [%3], %0\n" | |
218 | " brnz,pn %0, 2f\n" | |
219 | " or %0, %4, %1\n" | |
220 | " cas [%3], %0, %1\n" | |
221 | " cmp %0, %1\n" | |
b445e26c | 222 | " membar #StoreLoad | #StoreStore\n" |
1da177e4 | 223 | " bne,pn %%icc, 1b\n" |
b445e26c | 224 | " nop\n" |
1da177e4 LT |
225 | " mov 1, %2\n" |
226 | "2:" | |
227 | : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result) | |
228 | : "r" (lock), "r" (mask) | |
229 | : "memory"); | |
230 | ||
231 | return result; | |
232 | } | |
233 | ||
fb1c8f93 | 234 | #define __raw_read_lock(p) __read_lock(p) |
d3ed309a | 235 | #define __raw_read_trylock(p) __read_trylock(p) |
fb1c8f93 IM |
236 | #define __raw_read_unlock(p) __read_unlock(p) |
237 | #define __raw_write_lock(p) __write_lock(p) | |
238 | #define __raw_write_unlock(p) __write_unlock(p) | |
239 | #define __raw_write_trylock(p) __write_trylock(p) | |
240 | ||
fb1c8f93 IM |
241 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) |
242 | #define __raw_write_can_lock(rw) (!(rw)->lock) | |
1da177e4 | 243 | |
ef6edc97 MS |
244 | #define _raw_spin_relax(lock) cpu_relax() |
245 | #define _raw_read_relax(lock) cpu_relax() | |
246 | #define _raw_write_relax(lock) cpu_relax() | |
247 | ||
1da177e4 LT |
248 | #endif /* !(__ASSEMBLY__) */ |
249 | ||
250 | #endif /* !(__SPARC64_SPINLOCK_H) */ |