]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #if __LINUX_ARM_ARCH__ < 6 | |
5 | #error SMP not supported on pre-ARMv6 CPUs | |
6 | #endif | |
7 | ||
8 | /* | |
9 | * ARMv6 Spin-locking. | |
10 | * | |
6d9b37a3 RK |
11 | * We exclusively read the old value. If it is zero, we may have |
12 | * won the lock, so we try exclusively storing it. A memory barrier | |
13 | * is required after we get a lock, and before we release it, because | |
14 | * V6 CPUs are assumed to have weakly ordered memory. | |
1da177e4 LT |
15 | * |
16 | * Unlocked value: 0 | |
17 | * Locked value: 1 | |
18 | */ | |
1da177e4 | 19 | |
fb1c8f93 IM |
20 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
21 | #define __raw_spin_unlock_wait(lock) \ | |
22 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | |
1da177e4 | 23 | |
fb1c8f93 | 24 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
1da177e4 | 25 | |
fb1c8f93 | 26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
1da177e4 LT |
27 | { |
28 | unsigned long tmp; | |
29 | ||
30 | __asm__ __volatile__( | |
31 | "1: ldrex %0, [%1]\n" | |
32 | " teq %0, #0\n" | |
00b4c907 RK |
33 | #ifdef CONFIG_CPU_32v6K |
34 | " wfene\n" | |
35 | #endif | |
1da177e4 LT |
36 | " strexeq %0, %2, [%1]\n" |
37 | " teqeq %0, #0\n" | |
38 | " bne 1b" | |
39 | : "=&r" (tmp) | |
40 | : "r" (&lock->lock), "r" (1) | |
6d9b37a3 RK |
41 | : "cc"); |
42 | ||
43 | smp_mb(); | |
1da177e4 LT |
44 | } |
45 | ||
fb1c8f93 | 46 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
1da177e4 LT |
47 | { |
48 | unsigned long tmp; | |
49 | ||
50 | __asm__ __volatile__( | |
51 | " ldrex %0, [%1]\n" | |
52 | " teq %0, #0\n" | |
53 | " strexeq %0, %2, [%1]" | |
54 | : "=&r" (tmp) | |
55 | : "r" (&lock->lock), "r" (1) | |
6d9b37a3 RK |
56 | : "cc"); |
57 | ||
58 | if (tmp == 0) { | |
59 | smp_mb(); | |
60 | return 1; | |
61 | } else { | |
62 | return 0; | |
63 | } | |
1da177e4 LT |
64 | } |
65 | ||
fb1c8f93 | 66 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4 | 67 | { |
6d9b37a3 RK |
68 | smp_mb(); |
69 | ||
1da177e4 | 70 | __asm__ __volatile__( |
00b4c907 RK |
71 | " str %1, [%0]\n" |
72 | #ifdef CONFIG_CPU_32v6K | |
73 | " mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ | |
74 | " sev" | |
75 | #endif | |
1da177e4 LT |
76 | : |
77 | : "r" (&lock->lock), "r" (0) | |
6d9b37a3 | 78 | : "cc"); |
1da177e4 LT |
79 | } |
80 | ||
81 | /* | |
82 | * RWLOCKS | |
fb1c8f93 IM |
83 | * |
84 | * | |
1da177e4 LT |
85 | * Write locks are easy - we just set bit 31. When unlocking, we can |
86 | * just write zero since the lock is exclusively held. | |
87 | */ | |
fb1c8f93 IM |
88 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) |
89 | ||
7e86df27 | 90 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
1da177e4 LT |
91 | { |
92 | unsigned long tmp; | |
93 | ||
94 | __asm__ __volatile__( | |
95 | "1: ldrex %0, [%1]\n" | |
96 | " teq %0, #0\n" | |
00b4c907 RK |
97 | #ifdef CONFIG_CPU_32v6K |
98 | " wfene\n" | |
99 | #endif | |
1da177e4 LT |
100 | " strexeq %0, %2, [%1]\n" |
101 | " teq %0, #0\n" | |
102 | " bne 1b" | |
103 | : "=&r" (tmp) | |
104 | : "r" (&rw->lock), "r" (0x80000000) | |
6d9b37a3 RK |
105 | : "cc"); |
106 | ||
107 | smp_mb(); | |
1da177e4 LT |
108 | } |
109 | ||
7e86df27 | 110 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
4e8fd22b RK |
111 | { |
112 | unsigned long tmp; | |
113 | ||
114 | __asm__ __volatile__( | |
115 | "1: ldrex %0, [%1]\n" | |
116 | " teq %0, #0\n" | |
117 | " strexeq %0, %2, [%1]" | |
118 | : "=&r" (tmp) | |
119 | : "r" (&rw->lock), "r" (0x80000000) | |
6d9b37a3 RK |
120 | : "cc"); |
121 | ||
122 | if (tmp == 0) { | |
123 | smp_mb(); | |
124 | return 1; | |
125 | } else { | |
126 | return 0; | |
127 | } | |
4e8fd22b RK |
128 | } |
129 | ||
fb1c8f93 | 130 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
1da177e4 | 131 | { |
6d9b37a3 RK |
132 | smp_mb(); |
133 | ||
1da177e4 | 134 | __asm__ __volatile__( |
00b4c907 RK |
135 | "str %1, [%0]\n" |
136 | #ifdef CONFIG_CPU_32v6K | |
137 | " mcr p15, 0, %1, c7, c10, 4\n" /* DSB */ | |
138 | " sev\n" | |
139 | #endif | |
1da177e4 LT |
140 | : |
141 | : "r" (&rw->lock), "r" (0) | |
6d9b37a3 | 142 | : "cc"); |
1da177e4 LT |
143 | } |
144 | ||
c2a4c406 CM |
145 | /* write_can_lock - would write_trylock() succeed? */ |
146 | #define __raw_write_can_lock(x) ((x)->lock == 0x80000000) | |
147 | ||
1da177e4 LT |
148 | /* |
149 | * Read locks are a bit more hairy: | |
150 | * - Exclusively load the lock value. | |
151 | * - Increment it. | |
152 | * - Store new lock value if positive, and we still own this location. | |
153 | * If the value is negative, we've already failed. | |
154 | * - If we failed to store the value, we want a negative result. | |
155 | * - If we failed, try again. | |
156 | * Unlocking is similarly hairy. We may have multiple read locks | |
157 | * currently active. However, we know we won't have any write | |
158 | * locks. | |
159 | */ | |
fb1c8f93 | 160 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
1da177e4 LT |
161 | { |
162 | unsigned long tmp, tmp2; | |
163 | ||
164 | __asm__ __volatile__( | |
165 | "1: ldrex %0, [%2]\n" | |
166 | " adds %0, %0, #1\n" | |
167 | " strexpl %1, %0, [%2]\n" | |
00b4c907 RK |
168 | #ifdef CONFIG_CPU_32v6K |
169 | " wfemi\n" | |
170 | #endif | |
1da177e4 LT |
171 | " rsbpls %0, %1, #0\n" |
172 | " bmi 1b" | |
173 | : "=&r" (tmp), "=&r" (tmp2) | |
174 | : "r" (&rw->lock) | |
6d9b37a3 RK |
175 | : "cc"); |
176 | ||
177 | smp_mb(); | |
1da177e4 LT |
178 | } |
179 | ||
7e86df27 | 180 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
1da177e4 | 181 | { |
4e8fd22b RK |
182 | unsigned long tmp, tmp2; |
183 | ||
6d9b37a3 RK |
184 | smp_mb(); |
185 | ||
1da177e4 LT |
186 | __asm__ __volatile__( |
187 | "1: ldrex %0, [%2]\n" | |
188 | " sub %0, %0, #1\n" | |
189 | " strex %1, %0, [%2]\n" | |
190 | " teq %1, #0\n" | |
191 | " bne 1b" | |
00b4c907 RK |
192 | #ifdef CONFIG_CPU_32v6K |
193 | "\n cmp %0, #0\n" | |
194 | " mcreq p15, 0, %0, c7, c10, 4\n" | |
195 | " seveq" | |
196 | #endif | |
1da177e4 LT |
197 | : "=&r" (tmp), "=&r" (tmp2) |
198 | : "r" (&rw->lock) | |
6d9b37a3 | 199 | : "cc"); |
1da177e4 LT |
200 | } |
201 | ||
8e34703b RK |
202 | static inline int __raw_read_trylock(raw_rwlock_t *rw) |
203 | { | |
e89bc811 | 204 | unsigned long tmp, tmp2 = 1; |
8e34703b RK |
205 | |
206 | __asm__ __volatile__( | |
207 | "1: ldrex %0, [%2]\n" | |
208 | " adds %0, %0, #1\n" | |
209 | " strexpl %1, %0, [%2]\n" | |
210 | : "=&r" (tmp), "+r" (tmp2) | |
211 | : "r" (&rw->lock) | |
212 | : "cc"); | |
213 | ||
214 | smp_mb(); | |
215 | return tmp2 == 0; | |
216 | } | |
1da177e4 | 217 | |
c2a4c406 CM |
218 | /* read_can_lock - would read_trylock() succeed? */ |
219 | #define __raw_read_can_lock(x) ((x)->lock < 0x80000000) | |
220 | ||
ef6edc97 MS |
221 | #define _raw_spin_relax(lock) cpu_relax() |
222 | #define _raw_read_relax(lock) cpu_relax() | |
223 | #define _raw_write_relax(lock) cpu_relax() | |
224 | ||
1da177e4 | 225 | #endif /* __ASM_SPINLOCK_H */ |