]>
Commit | Line | Data |
---|---|---|
d5975363 PB |
1 | /* |
2 | * Copyright (c) 2003 Fabrice Bellard | |
3 | * | |
4 | * This library is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU Lesser General Public | |
6 | * License as published by the Free Software Foundation; either | |
7 | * version 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | * This library is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * Lesser General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU Lesser General Public | |
15 | * License along with this library; if not, write to the Free Software | |
fad6cb1a | 16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA |
d5975363 PB |
17 | */ |
18 | ||
19 | /* Locking primitives. Most of this code should be redundant - | |
20 | system emulation doesn't need/use locking, NPTL userspace uses | |
21 | pthread mutexes, and non-NPTL userspace isn't threadsafe anyway. | |
22 | In either case a spinlock is probably the wrong kind of lock. | |
23 | Spinlocks are only good if you know annother CPU has the lock and is | |
24 | likely to release it soon. In environments where you have more threads | |
25 | than physical CPUs (the extreme case being a single CPU host) a spinlock | |
26 | simply wastes CPU until the OS decides to preempt it. */ | |
27 | #if defined(USE_NPTL) | |
28 | ||
29 | #include <pthread.h> | |
30 | #define spin_lock pthread_mutex_lock | |
31 | #define spin_unlock pthread_mutex_unlock | |
32 | #define spinlock_t pthread_mutex_t | |
33 | #define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER | |
34 | ||
35 | #else | |
36 | ||
37 | #if defined(__hppa__) | |
38 | ||
39 | typedef int spinlock_t[4]; | |
40 | ||
41 | #define SPIN_LOCK_UNLOCKED { 1, 1, 1, 1 } | |
42 | ||
43 | static inline void resetlock (spinlock_t *p) | |
44 | { | |
45 | (*p)[0] = (*p)[1] = (*p)[2] = (*p)[3] = 1; | |
46 | } | |
47 | ||
48 | #else | |
49 | ||
50 | typedef int spinlock_t; | |
51 | ||
52 | #define SPIN_LOCK_UNLOCKED 0 | |
53 | ||
54 | static inline void resetlock (spinlock_t *p) | |
55 | { | |
56 | *p = SPIN_LOCK_UNLOCKED; | |
57 | } | |
58 | ||
59 | #endif | |
60 | ||
61 | #if defined(__powerpc__) | |
62 | static inline int testandset (int *p) | |
63 | { | |
64 | int ret; | |
65 | __asm__ __volatile__ ( | |
14f87098 | 66 | " lwarx %0,0,%1\n" |
d5975363 | 67 | " xor. %0,%3,%0\n" |
14f87098 | 68 | " bne $+12\n" |
d5975363 | 69 | " stwcx. %2,0,%1\n" |
14f87098 | 70 | " bne- $-16\n" |
d5975363 PB |
71 | : "=&r" (ret) |
72 | : "r" (p), "r" (1), "r" (0) | |
73 | : "cr0", "memory"); | |
74 | return ret; | |
75 | } | |
76 | #elif defined(__i386__) | |
77 | static inline int testandset (int *p) | |
78 | { | |
79 | long int readval = 0; | |
80 | ||
81 | __asm__ __volatile__ ("lock; cmpxchgl %2, %0" | |
82 | : "+m" (*p), "+a" (readval) | |
83 | : "r" (1) | |
84 | : "cc"); | |
85 | return readval; | |
86 | } | |
87 | #elif defined(__x86_64__) | |
88 | static inline int testandset (int *p) | |
89 | { | |
90 | long int readval = 0; | |
91 | ||
92 | __asm__ __volatile__ ("lock; cmpxchgl %2, %0" | |
93 | : "+m" (*p), "+a" (readval) | |
94 | : "r" (1) | |
95 | : "cc"); | |
96 | return readval; | |
97 | } | |
98 | #elif defined(__s390__) | |
99 | static inline int testandset (int *p) | |
100 | { | |
101 | int ret; | |
102 | ||
103 | __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n" | |
104 | " jl 0b" | |
105 | : "=&d" (ret) | |
106 | : "r" (1), "a" (p), "0" (*p) | |
107 | : "cc", "memory" ); | |
108 | return ret; | |
109 | } | |
110 | #elif defined(__alpha__) | |
111 | static inline int testandset (int *p) | |
112 | { | |
113 | int ret; | |
114 | unsigned long one; | |
115 | ||
116 | __asm__ __volatile__ ("0: mov 1,%2\n" | |
117 | " ldl_l %0,%1\n" | |
118 | " stl_c %2,%1\n" | |
119 | " beq %2,1f\n" | |
120 | ".subsection 2\n" | |
121 | "1: br 0b\n" | |
122 | ".previous" | |
123 | : "=r" (ret), "=m" (*p), "=r" (one) | |
124 | : "m" (*p)); | |
125 | return ret; | |
126 | } | |
127 | #elif defined(__sparc__) | |
128 | static inline int testandset (int *p) | |
129 | { | |
130 | int ret; | |
131 | ||
132 | __asm__ __volatile__("ldstub [%1], %0" | |
133 | : "=r" (ret) | |
134 | : "r" (p) | |
135 | : "memory"); | |
136 | ||
137 | return (ret ? 1 : 0); | |
138 | } | |
139 | #elif defined(__arm__) | |
140 | static inline int testandset (int *spinlock) | |
141 | { | |
142 | register unsigned int ret; | |
143 | __asm__ __volatile__("swp %0, %1, [%2]" | |
144 | : "=r"(ret) | |
145 | : "0"(1), "r"(spinlock)); | |
146 | ||
147 | return ret; | |
148 | } | |
149 | #elif defined(__mc68000) | |
150 | static inline int testandset (int *p) | |
151 | { | |
152 | char ret; | |
153 | __asm__ __volatile__("tas %1; sne %0" | |
154 | : "=r" (ret) | |
155 | : "m" (p) | |
156 | : "cc","memory"); | |
157 | return ret; | |
158 | } | |
159 | #elif defined(__hppa__) | |
160 | ||
161 | /* Because malloc only guarantees 8-byte alignment for malloc'd data, | |
162 | and GCC only guarantees 8-byte alignment for stack locals, we can't | |
163 | be assured of 16-byte alignment for atomic lock data even if we | |
164 | specify "__attribute ((aligned(16)))" in the type declaration. So, | |
165 | we use a struct containing an array of four ints for the atomic lock | |
166 | type and dynamically select the 16-byte aligned int from the array | |
167 | for the semaphore. */ | |
168 | #define __PA_LDCW_ALIGNMENT 16 | |
169 | static inline void *ldcw_align (void *p) { | |
170 | unsigned long a = (unsigned long)p; | |
171 | a = (a + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); | |
172 | return (void *)a; | |
173 | } | |
174 | ||
175 | static inline int testandset (spinlock_t *p) | |
176 | { | |
177 | unsigned int ret; | |
178 | p = ldcw_align(p); | |
179 | __asm__ __volatile__("ldcw 0(%1),%0" | |
180 | : "=r" (ret) | |
181 | : "r" (p) | |
182 | : "memory" ); | |
183 | return !ret; | |
184 | } | |
185 | ||
186 | #elif defined(__ia64) | |
187 | ||
188 | #include <ia64intrin.h> | |
189 | ||
190 | static inline int testandset (int *p) | |
191 | { | |
192 | return __sync_lock_test_and_set (p, 1); | |
193 | } | |
194 | #elif defined(__mips__) | |
195 | static inline int testandset (int *p) | |
196 | { | |
197 | int ret; | |
198 | ||
199 | __asm__ __volatile__ ( | |
200 | " .set push \n" | |
201 | " .set noat \n" | |
202 | " .set mips2 \n" | |
203 | "1: li $1, 1 \n" | |
204 | " ll %0, %1 \n" | |
205 | " sc $1, %1 \n" | |
206 | " beqz $1, 1b \n" | |
207 | " .set pop " | |
208 | : "=r" (ret), "+R" (*p) | |
209 | : | |
210 | : "memory"); | |
211 | ||
212 | return ret; | |
213 | } | |
214 | #else | |
215 | #error unimplemented CPU support | |
216 | #endif | |
217 | ||
218 | #if defined(CONFIG_USER_ONLY) | |
219 | static inline void spin_lock(spinlock_t *lock) | |
220 | { | |
221 | while (testandset(lock)); | |
222 | } | |
223 | ||
224 | static inline void spin_unlock(spinlock_t *lock) | |
225 | { | |
226 | resetlock(lock); | |
227 | } | |
228 | ||
229 | static inline int spin_trylock(spinlock_t *lock) | |
230 | { | |
231 | return !testandset(lock); | |
232 | } | |
233 | #else | |
234 | static inline void spin_lock(spinlock_t *lock) | |
235 | { | |
236 | } | |
237 | ||
238 | static inline void spin_unlock(spinlock_t *lock) | |
239 | { | |
240 | } | |
241 | ||
242 | static inline int spin_trylock(spinlock_t *lock) | |
243 | { | |
244 | return 1; | |
245 | } | |
246 | #endif | |
247 | ||
248 | #endif |