]>
Commit | Line | Data |
---|---|---|
d5975363 PB |
1 | /* |
2 | * Copyright (c) 2003 Fabrice Bellard | |
3 | * | |
4 | * This library is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU Lesser General Public | |
6 | * License as published by the Free Software Foundation; either | |
7 | * version 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | * This library is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * Lesser General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 15 | * License along with this library; if not, see <http://www.gnu.org/licenses/> |
d5975363 PB |
16 | */ |
17 | ||
18 | /* Locking primitives. Most of this code should be redundant - | |
19 | system emulation doesn't need/use locking, NPTL userspace uses | |
20 | pthread mutexes, and non-NPTL userspace isn't threadsafe anyway. | |
21 | In either case a spinlock is probably the wrong kind of lock. | |
22 | Spinlocks are only good if you know annother CPU has the lock and is | |
23 | likely to release it soon. In environments where you have more threads | |
24 | than physical CPUs (the extreme case being a single CPU host) a spinlock | |
25 | simply wastes CPU until the OS decides to preempt it. */ | |
2f7bb878 | 26 | #if defined(CONFIG_USE_NPTL) |
d5975363 PB |
27 | |
28 | #include <pthread.h> | |
29 | #define spin_lock pthread_mutex_lock | |
30 | #define spin_unlock pthread_mutex_unlock | |
c227f099 | 31 | #define spinlock_t pthread_mutex_t |
d5975363 PB |
32 | #define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER |
33 | ||
34 | #else | |
35 | ||
36 | #if defined(__hppa__) | |
37 | ||
c227f099 | 38 | typedef int spinlock_t[4]; |
d5975363 PB |
39 | |
40 | #define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 } | |
41 | ||
c227f099 | 42 | static inline void resetlock (spinlock_t *p) |
d5975363 PB |
43 | { |
44 | (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1; | |
45 | } | |
46 | ||
47 | #else | |
48 | ||
c227f099 | 49 | typedef int spinlock_t; |
d5975363 PB |
50 | |
51 | #define SPIN_LOCK_UNLOCKED 0 | |
52 | ||
c227f099 | 53 | static inline void resetlock (spinlock_t *p) |
d5975363 PB |
54 | { |
55 | *p = SPIN_LOCK_UNLOCKED; | |
56 | } | |
57 | ||
58 | #endif | |
59 | ||
e58ffeb3 | 60 | #if defined(_ARCH_PPC) |
d5975363 PB |
61 | static inline int testandset (int *p) |
62 | { | |
63 | int ret; | |
64 | __asm__ __volatile__ ( | |
14f87098 | 65 | " lwarx %0,0,%1\n" |
d5975363 | 66 | " xor. %0,%3,%0\n" |
14f87098 | 67 | " bne $+12\n" |
d5975363 | 68 | " stwcx. %2,0,%1\n" |
14f87098 | 69 | " bne- $-16\n" |
d5975363 PB |
70 | : "=&r" (ret) |
71 | : "r" (p), "r" (1), "r" (0) | |
72 | : "cr0", "memory"); | |
73 | return ret; | |
74 | } | |
75 | #elif defined(__i386__) | |
76 | static inline int testandset (int *p) | |
77 | { | |
78 | long int readval = 0; | |
79 | ||
80 | __asm__ __volatile__ ("lock; cmpxchgl %2, %0" | |
81 | : "+m" (*p), "+a" (readval) | |
82 | : "r" (1) | |
83 | : "cc"); | |
84 | return readval; | |
85 | } | |
86 | #elif defined(__x86_64__) | |
87 | static inline int testandset (int *p) | |
88 | { | |
89 | long int readval = 0; | |
90 | ||
91 | __asm__ __volatile__ ("lock; cmpxchgl %2, %0" | |
92 | : "+m" (*p), "+a" (readval) | |
93 | : "r" (1) | |
94 | : "cc"); | |
95 | return readval; | |
96 | } | |
97 | #elif defined(__s390__) | |
98 | static inline int testandset (int *p) | |
99 | { | |
100 | int ret; | |
101 | ||
102 | __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n" | |
103 | " jl 0b" | |
104 | : "=&d" (ret) | |
105 | : "r" (1), "a" (p), "0" (*p) | |
106 | : "cc", "memory" ); | |
107 | return ret; | |
108 | } | |
109 | #elif defined(__alpha__) | |
110 | static inline int testandset (int *p) | |
111 | { | |
112 | int ret; | |
113 | unsigned long one; | |
114 | ||
115 | __asm__ __volatile__ ("0: mov 1,%2\n" | |
116 | " ldl_l %0,%1\n" | |
117 | " stl_c %2,%1\n" | |
118 | " beq %2,1f\n" | |
119 | ".subsection 2\n" | |
120 | "1: br 0b\n" | |
121 | ".previous" | |
122 | : "=r" (ret), "=m" (*p), "=r" (one) | |
123 | : "m" (*p)); | |
124 | return ret; | |
125 | } | |
126 | #elif defined(__sparc__) | |
127 | static inline int testandset (int *p) | |
128 | { | |
129 | int ret; | |
130 | ||
131 | __asm__ __volatile__("ldstub [%1], %0" | |
132 | : "=r" (ret) | |
133 | : "r" (p) | |
134 | : "memory"); | |
135 | ||
136 | return (ret ? 1 : 0); | |
137 | } | |
138 | #elif defined(__arm__) | |
139 | static inline int testandset (int *spinlock) | |
140 | { | |
141 | register unsigned int ret; | |
142 | __asm__ __volatile__("swp %0, %1, [%2]" | |
143 | : "=r"(ret) | |
144 | : "0"(1), "r"(spinlock)); | |
145 | ||
146 | return ret; | |
147 | } | |
148 | #elif defined(__mc68000) | |
149 | static inline int testandset (int *p) | |
150 | { | |
151 | char ret; | |
152 | __asm__ __volatile__("tas %1; sne %0" | |
153 | : "=r" (ret) | |
154 | : "m" (p) | |
155 | : "cc","memory"); | |
156 | return ret; | |
157 | } | |
158 | #elif defined(__hppa__) | |
159 | ||
160 | /* Because malloc only guarantees 8-byte alignment for malloc'd data, | |
161 | and GCC only guarantees 8-byte alignment for stack locals, we can't | |
162 | be assured of 16-byte alignment for atomic lock data even if we | |
163 | specify "__attribute ((aligned(16)))" in the type declaration. So, | |
164 | we use a struct containing an array of four ints for the atomic lock | |
165 | type and dynamically select the 16-byte aligned int from the array | |
166 | for the semaphore. */ | |
167 | #define __PA_LDCW_ALIGNMENT 16 | |
168 | static inline void *ldcw_align (void *p) { | |
169 | unsigned long a = (unsigned long)p; | |
170 | a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); | |
171 | return (void *)a; | |
172 | } | |
173 | ||
c227f099 | 174 | static inline int testandset (spinlock_t *p) |
d5975363 PB |
175 | { |
176 | unsigned int ret; | |
177 | p = ldcw_align(p); | |
178 | __asm__ __volatile__("ldcw 0(%1),%0" | |
179 | : "=r" (ret) | |
180 | : "r" (p) | |
181 | : "memory" ); | |
182 | return !ret; | |
183 | } | |
184 | ||
185 | #elif defined(__ia64) | |
186 | ||
187 | #include <ia64intrin.h> | |
188 | ||
189 | static inline int testandset (int *p) | |
190 | { | |
191 | return __sync_lock_test_and_set (p, 1); | |
192 | } | |
193 | #elif defined(__mips__) | |
194 | static inline int testandset (int *p) | |
195 | { | |
196 | int ret; | |
197 | ||
198 | __asm__ __volatile__ ( | |
199 | " .set push \n" | |
200 | " .set noat \n" | |
201 | " .set mips2 \n" | |
202 | "1: li $1, 1 \n" | |
203 | " ll %0, %1 \n" | |
204 | " sc $1, %1 \n" | |
205 | " beqz $1, 1b \n" | |
206 | " .set pop " | |
207 | : "=r" (ret), "+R" (*p) | |
208 | : | |
209 | : "memory"); | |
210 | ||
211 | return ret; | |
212 | } | |
213 | #else | |
214 | #error unimplemented CPU support | |
215 | #endif | |
216 | ||
217 | #if defined(CONFIG_USER_ONLY) | |
c227f099 | 218 | static inline void spin_lock(spinlock_t *lock) |
d5975363 PB |
219 | { |
220 | while (testandset(lock)); | |
221 | } | |
222 | ||
c227f099 | 223 | static inline void spin_unlock(spinlock_t *lock) |
d5975363 PB |
224 | { |
225 | resetlock(lock); | |
226 | } | |
227 | ||
c227f099 | 228 | static inline int spin_trylock(spinlock_t *lock) |
d5975363 PB |
229 | { |
230 | return !testandset(lock); | |
231 | } | |
232 | #else | |
c227f099 | 233 | static inline void spin_lock(spinlock_t *lock) |
d5975363 PB |
234 | { |
235 | } | |
236 | ||
c227f099 | 237 | static inline void spin_unlock(spinlock_t *lock) |
d5975363 PB |
238 | { |
239 | } | |
240 | ||
c227f099 | 241 | static inline int spin_trylock(spinlock_t *lock) |
d5975363 PB |
242 | { |
243 | return 1; | |
244 | } | |
245 | #endif | |
246 | ||
247 | #endif |