]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #include <asm/atomic.h> | |
5 | #include <asm/rwlock.h> | |
6 | #include <asm/page.h> | |
2bd0cfbd | 7 | #include <asm/processor.h> |
1da177e4 | 8 | |
1da177e4 LT |
9 | /* |
10 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
fb1c8f93 | 11 | * |
1da177e4 LT |
12 | * Simple spin lock operations. There are two variants, one clears IRQ's |
13 | * on the local processor, one does not. | |
14 | * | |
15 | * We make no fairness assumptions. They have a cost. | |
fb1c8f93 IM |
16 | * |
17 | * (the type definitions are in asm/spinlock_types.h) | |
1da177e4 LT |
18 | */ |
19 | ||
8b059d23 AK |
20 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
21 | { | |
22 | return *(volatile signed int *)(&(lock)->slock) <= 0; | |
23 | } | |
1da177e4 | 24 | |
fb1c8f93 | 25 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
1da177e4 | 26 | { |
8b059d23 AK |
27 | asm volatile( |
28 | "\n1:\t" | |
29 | LOCK_PREFIX " ; decl %0\n\t" | |
30 | "jns 2f\n" | |
31 | "3:\n" | |
32 | "rep;nop\n\t" | |
33 | "cmpl $0,%0\n\t" | |
34 | "jle 3b\n\t" | |
35 | "jmp 1b\n" | |
36 | "2:\t" : "=m" (lock->slock) : : "memory"); | |
1da177e4 LT |
37 | } |
38 | ||
fb1c8f93 | 39 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
1da177e4 | 40 | |
fb1c8f93 | 41 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
1da177e4 | 42 | { |
485832a5 | 43 | int oldval; |
fb1c8f93 | 44 | |
8b059d23 | 45 | asm volatile( |
485832a5 | 46 | "xchgl %0,%1" |
fb1c8f93 | 47 | :"=q" (oldval), "=m" (lock->slock) |
1da177e4 | 48 | :"0" (0) : "memory"); |
fb1c8f93 | 49 | |
1da177e4 LT |
50 | return oldval > 0; |
51 | } | |
52 | ||
fb1c8f93 | 53 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4 | 54 | { |
8b059d23 | 55 | asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory"); |
1da177e4 LT |
56 | } |
57 | ||
8b059d23 AK |
58 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
59 | { | |
60 | while (__raw_spin_is_locked(lock)) | |
61 | cpu_relax(); | |
62 | } | |
1da177e4 LT |
63 | |
64 | /* | |
65 | * Read-write spinlocks, allowing multiple readers | |
66 | * but only one writer. | |
67 | * | |
68 | * NOTE! it is quite common to have readers in interrupts | |
69 | * but no interrupt writers. For those circumstances we | |
70 | * can "mix" irq-safe locks - any writer needs to get a | |
71 | * irq-safe write-lock, but readers can get non-irqsafe | |
72 | * read-locks. | |
fb1c8f93 | 73 | * |
1da177e4 LT |
74 | * On x86, we implement read-write locks as a 32-bit counter |
75 | * with the high bit (sign) being the "contended" bit. | |
1da177e4 | 76 | */ |
1da177e4 | 77 | |
8b059d23 AK |
78 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
79 | { | |
80 | return (int)(lock)->lock > 0; | |
81 | } | |
82 | ||
83 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) | |
84 | { | |
85 | return (lock)->lock == RW_LOCK_BIAS; | |
86 | } | |
fb1c8f93 IM |
87 | |
88 | static inline void __raw_read_lock(raw_rwlock_t *rw) | |
1da177e4 | 89 | { |
8b059d23 AK |
90 | asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" |
91 | "jns 1f\n" | |
92 | "call __read_lock_failed\n" | |
93 | "1:\n" | |
94 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); | |
1da177e4 LT |
95 | } |
96 | ||
fb1c8f93 | 97 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
1da177e4 | 98 | { |
8b059d23 AK |
99 | asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t" |
100 | "jz 1f\n" | |
101 | "\tcall __write_lock_failed\n\t" | |
102 | "1:\n" | |
103 | ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); | |
1da177e4 LT |
104 | } |
105 | ||
fb1c8f93 | 106 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
1da177e4 LT |
107 | { |
108 | atomic_t *count = (atomic_t *)lock; | |
109 | atomic_dec(count); | |
110 | if (atomic_read(count) >= 0) | |
111 | return 1; | |
112 | atomic_inc(count); | |
113 | return 0; | |
114 | } | |
115 | ||
fb1c8f93 | 116 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
1da177e4 LT |
117 | { |
118 | atomic_t *count = (atomic_t *)lock; | |
119 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | |
120 | return 1; | |
121 | atomic_add(RW_LOCK_BIAS, count); | |
122 | return 0; | |
123 | } | |
124 | ||
fb1c8f93 IM |
125 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
126 | { | |
841be8dd | 127 | asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory"); |
fb1c8f93 IM |
128 | } |
129 | ||
130 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | |
131 | { | |
841be8dd | 132 | asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0" |
fb1c8f93 IM |
133 | : "=m" (rw->lock) : : "memory"); |
134 | } | |
135 | ||
ef6edc97 MS |
136 | #define _raw_spin_relax(lock) cpu_relax() |
137 | #define _raw_read_relax(lock) cpu_relax() | |
138 | #define _raw_write_relax(lock) cpu_relax() | |
139 | ||
1da177e4 | 140 | #endif /* __ASM_SPINLOCK_H */ |