]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #include <asm/atomic.h> | |
5 | #include <asm/rwlock.h> | |
6 | #include <asm/page.h> | |
fb2e2848 | 7 | #include <asm/processor.h> |
1da177e4 LT |
8 | #include <linux/compiler.h> |
9 | ||
0da5db31 RR |
10 | #define CLI_STRING "cli" |
11 | #define STI_STRING "sti" | |
12 | ||
1da177e4 LT |
13 | /* |
14 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
fb1c8f93 | 15 | * |
1da177e4 LT |
16 | * Simple spin lock operations. There are two variants, one clears IRQ's |
17 | * on the local processor, one does not. | |
18 | * | |
19 | * We make no fairness assumptions. They have a cost. | |
fb1c8f93 IM |
20 | * |
21 | * (the type definitions are in asm/spinlock_types.h) | |
1da177e4 LT |
22 | */ |
23 | ||
fb2e2848 AK |
24 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
25 | { | |
26 | return *(volatile signed char *)(&(x)->slock) <= 0; | |
27 | } | |
1da177e4 | 28 | |
fb1c8f93 IM |
29 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
30 | { | |
fb2e2848 AK |
31 | asm volatile("\n1:\t" |
32 | LOCK_PREFIX " ; decb %0\n\t" | |
33 | "jns 3f\n" | |
34 | "2:\t" | |
35 | "rep;nop\n\t" | |
36 | "cmpb $0,%0\n\t" | |
37 | "jle 2b\n\t" | |
38 | "jmp 1b\n" | |
39 | "3:\n\t" | |
40 | : "+m" (lock->slock) : : "memory"); | |
fb1c8f93 IM |
41 | } |
42 | ||
8a25d5de IM |
43 | /* |
44 | * It is easier for the lock validator if interrupts are not re-enabled | |
45 | * in the middle of a lock-acquire. This is a performance feature anyway | |
46 | * so we turn it off: | |
fb2e2848 AK |
47 | * |
48 | * NOTE: there's an irqs-on section here, which normally would have to be | |
49 | * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant. | |
8a25d5de IM |
50 | */ |
51 | #ifndef CONFIG_PROVE_LOCKING | |
fb1c8f93 IM |
52 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
53 | { | |
fb2e2848 AK |
54 | asm volatile( |
55 | "\n1:\t" | |
56 | LOCK_PREFIX " ; decb %0\n\t" | |
57 | "jns 5f\n" | |
58 | "2:\t" | |
59 | "testl $0x200, %1\n\t" | |
60 | "jz 4f\n\t" | |
0da5db31 | 61 | STI_STRING "\n" |
fb2e2848 AK |
62 | "3:\t" |
63 | "rep;nop\n\t" | |
64 | "cmpb $0, %0\n\t" | |
65 | "jle 3b\n\t" | |
0da5db31 | 66 | CLI_STRING "\n\t" |
fb2e2848 AK |
67 | "jmp 1b\n" |
68 | "4:\t" | |
69 | "rep;nop\n\t" | |
70 | "cmpb $0, %0\n\t" | |
71 | "jg 1b\n\t" | |
72 | "jmp 4b\n" | |
73 | "5:\n\t" | |
74 | : "+m" (lock->slock) : "r" (flags) : "memory"); | |
fb1c8f93 | 75 | } |
8a25d5de | 76 | #endif |
fb1c8f93 IM |
77 | |
78 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |
79 | { | |
80 | char oldval; | |
fb2e2848 | 81 | asm volatile( |
fb1c8f93 | 82 | "xchgb %b0,%1" |
b862f3b0 | 83 | :"=q" (oldval), "+m" (lock->slock) |
fb1c8f93 IM |
84 | :"0" (0) : "memory"); |
85 | return oldval > 0; | |
86 | } | |
87 | ||
1da177e4 | 88 | /* |
fb1c8f93 IM |
89 | * __raw_spin_unlock based on writing $1 to the low byte. |
90 | * This method works. Despite all the confusion. | |
91 | * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there) | |
1da177e4 LT |
92 | * (PPro errata 66, 92) |
93 | */ | |
94 | ||
95 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | |
96 | ||
fb1c8f93 | 97 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4 | 98 | { |
fb2e2848 | 99 | asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory"); |
1da177e4 LT |
100 | } |
101 | ||
102 | #else | |
103 | ||
fb1c8f93 | 104 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4 LT |
105 | { |
106 | char oldval = 1; | |
1da177e4 | 107 | |
fb2e2848 AK |
108 | asm volatile("xchgb %b0, %1" |
109 | : "=q" (oldval), "+m" (lock->slock) | |
110 | : "0" (oldval) : "memory"); | |
1da177e4 LT |
111 | } |
112 | ||
1da177e4 | 113 | #endif |
1da177e4 | 114 | |
fb2e2848 AK |
115 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
116 | { | |
117 | while (__raw_spin_is_locked(lock)) | |
118 | cpu_relax(); | |
119 | } | |
1da177e4 LT |
120 | |
121 | /* | |
122 | * Read-write spinlocks, allowing multiple readers | |
123 | * but only one writer. | |
124 | * | |
125 | * NOTE! it is quite common to have readers in interrupts | |
126 | * but no interrupt writers. For those circumstances we | |
127 | * can "mix" irq-safe locks - any writer needs to get a | |
128 | * irq-safe write-lock, but readers can get non-irqsafe | |
129 | * read-locks. | |
fb1c8f93 IM |
130 | * |
131 | * On x86, we implement read-write locks as a 32-bit counter | |
132 | * with the high bit (sign) being the "contended" bit. | |
133 | * | |
134 | * The inline assembly is non-obvious. Think about it. | |
135 | * | |
136 | * Changed to use the same technique as rw semaphores. See | |
137 | * semaphore.h for details. -ben | |
138 | * | |
139 | * the helpers are in arch/i386/kernel/semaphore.c | |
1da177e4 | 140 | */ |
1da177e4 LT |
141 | |
142 | /** | |
143 | * read_can_lock - would read_trylock() succeed? | |
144 | * @lock: the rwlock in question. | |
145 | */ | |
fb2e2848 AK |
146 | static inline int __raw_read_can_lock(raw_rwlock_t *x) |
147 | { | |
148 | return (int)(x)->lock > 0; | |
149 | } | |
1da177e4 LT |
150 | |
151 | /** | |
152 | * write_can_lock - would write_trylock() succeed? | |
153 | * @lock: the rwlock in question. | |
154 | */ | |
fb2e2848 AK |
155 | static inline int __raw_write_can_lock(raw_rwlock_t *x) |
156 | { | |
157 | return (x)->lock == RW_LOCK_BIAS; | |
158 | } | |
1da177e4 | 159 | |
fb1c8f93 | 160 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
1da177e4 | 161 | { |
fb2e2848 AK |
162 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" |
163 | "jns 1f\n" | |
164 | "call __read_lock_failed\n\t" | |
165 | "1:\n" | |
166 | ::"a" (rw) : "memory"); | |
1da177e4 LT |
167 | } |
168 | ||
fb1c8f93 | 169 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
1da177e4 | 170 | { |
fb2e2848 AK |
171 | asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" |
172 | "jz 1f\n" | |
173 | "call __write_lock_failed\n\t" | |
174 | "1:\n" | |
175 | ::"a" (rw) : "memory"); | |
1da177e4 LT |
176 | } |
177 | ||
fb1c8f93 | 178 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
1da177e4 LT |
179 | { |
180 | atomic_t *count = (atomic_t *)lock; | |
181 | atomic_dec(count); | |
182 | if (atomic_read(count) >= 0) | |
183 | return 1; | |
184 | atomic_inc(count); | |
185 | return 0; | |
186 | } | |
187 | ||
fb1c8f93 | 188 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
1da177e4 LT |
189 | { |
190 | atomic_t *count = (atomic_t *)lock; | |
191 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | |
192 | return 1; | |
193 | atomic_add(RW_LOCK_BIAS, count); | |
194 | return 0; | |
195 | } | |
196 | ||
fb1c8f93 IM |
197 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
198 | { | |
b862f3b0 | 199 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); |
fb1c8f93 IM |
200 | } |
201 | ||
202 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | |
203 | { | |
9a0b5817 | 204 | asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0" |
b862f3b0 | 205 | : "+m" (rw->lock) : : "memory"); |
fb1c8f93 IM |
206 | } |
207 | ||
ef6edc97 MS |
208 | #define _raw_spin_relax(lock) cpu_relax() |
209 | #define _raw_read_relax(lock) cpu_relax() | |
210 | #define _raw_write_relax(lock) cpu_relax() | |
211 | ||
1da177e4 | 212 | #endif /* __ASM_SPINLOCK_H */ |