]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #include <asm/system.h> | |
fb1c8f93 IM |
5 | #include <asm/processor.h> |
6 | #include <asm/spinlock_types.h> | |
1da177e4 | 7 | |
fb1c8f93 | 8 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
1da177e4 LT |
9 | { |
10 | volatile unsigned int *a = __ldcw_align(x); | |
11 | return *a == 0; | |
12 | } | |
13 | ||
08dc2ca6 | 14 | #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) |
fb1c8f93 IM |
15 | #define __raw_spin_unlock_wait(x) \ |
16 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | |
1da177e4 | 17 | |
08dc2ca6 JB |
18 | static inline void __raw_spin_lock_flags(raw_spinlock_t *x, |
19 | unsigned long flags) | |
1da177e4 LT |
20 | { |
21 | volatile unsigned int *a; | |
22 | ||
23 | mb(); | |
24 | a = __ldcw_align(x); | |
25 | while (__ldcw(a) == 0) | |
08dc2ca6 JB |
26 | while (*a == 0) |
27 | if (flags & PSW_SM_I) { | |
28 | local_irq_enable(); | |
29 | cpu_relax(); | |
30 | local_irq_disable(); | |
31 | } else | |
32 | cpu_relax(); | |
1da177e4 LT |
33 | mb(); |
34 | } | |
35 | ||
fb1c8f93 | 36 | static inline void __raw_spin_unlock(raw_spinlock_t *x) |
1da177e4 LT |
37 | { |
38 | volatile unsigned int *a; | |
39 | mb(); | |
40 | a = __ldcw_align(x); | |
41 | *a = 1; | |
42 | mb(); | |
43 | } | |
44 | ||
fb1c8f93 | 45 | static inline int __raw_spin_trylock(raw_spinlock_t *x) |
1da177e4 LT |
46 | { |
47 | volatile unsigned int *a; | |
48 | int ret; | |
49 | ||
50 | mb(); | |
51 | a = __ldcw_align(x); | |
52 | ret = __ldcw(a) != 0; | |
53 | mb(); | |
54 | ||
55 | return ret; | |
56 | } | |
1da177e4 LT |
57 | |
58 | /* | |
59 | * Read-write spinlocks, allowing multiple readers | |
60 | * but only one writer. | |
61 | */ | |
1da177e4 | 62 | |
fb1c8f93 | 63 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
1da177e4 LT |
64 | |
65 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow | |
66 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ | |
67 | ||
fb1c8f93 | 68 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
1da177e4 | 69 | { |
fb1c8f93 | 70 | __raw_spin_lock(&rw->lock); |
1da177e4 LT |
71 | |
72 | rw->counter++; | |
73 | ||
fb1c8f93 | 74 | __raw_spin_unlock(&rw->lock); |
1da177e4 | 75 | } |
1da177e4 | 76 | |
fb1c8f93 | 77 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
1da177e4 | 78 | { |
fb1c8f93 | 79 | __raw_spin_lock(&rw->lock); |
1da177e4 LT |
80 | |
81 | rw->counter--; | |
82 | ||
fb1c8f93 | 83 | __raw_spin_unlock(&rw->lock); |
1da177e4 LT |
84 | } |
85 | ||
86 | /* write_lock is less trivial. We optimistically grab the lock and check | |
87 | * if we surprised any readers. If so we release the lock and wait till | |
88 | * they're all gone before trying again | |
89 | * | |
90 | * Also note that we don't use the _irqsave / _irqrestore suffixes here. | |
91 | * If we're called with interrupts enabled and we've got readers (or other | |
92 | * writers) in interrupt handlers someone fucked up and we'd dead-lock | |
93 | * sooner or later anyway. prumpf */ | |
94 | ||
fb1c8f93 | 95 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
1da177e4 LT |
96 | { |
97 | retry: | |
fb1c8f93 | 98 | __raw_spin_lock(&rw->lock); |
1da177e4 LT |
99 | |
100 | if(rw->counter != 0) { | |
101 | /* this basically never happens */ | |
fb1c8f93 | 102 | __raw_spin_unlock(&rw->lock); |
1da177e4 | 103 | |
fb1c8f93 IM |
104 | while (rw->counter != 0) |
105 | cpu_relax(); | |
1da177e4 LT |
106 | |
107 | goto retry; | |
108 | } | |
109 | ||
110 | /* got it. now leave without unlocking */ | |
111 | rw->counter = -1; /* remember we are locked */ | |
112 | } | |
1da177e4 LT |
113 | |
114 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ | |
115 | ||
fb1c8f93 | 116 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
1da177e4 LT |
117 | { |
118 | rw->counter = 0; | |
fb1c8f93 | 119 | __raw_spin_unlock(&rw->lock); |
1da177e4 LT |
120 | } |
121 | ||
fb1c8f93 | 122 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
1da177e4 | 123 | { |
fb1c8f93 | 124 | __raw_spin_lock(&rw->lock); |
1da177e4 LT |
125 | if (rw->counter != 0) { |
126 | /* this basically never happens */ | |
fb1c8f93 | 127 | __raw_spin_unlock(&rw->lock); |
1da177e4 LT |
128 | |
129 | return 0; | |
130 | } | |
131 | ||
132 | /* got it. now leave without unlocking */ | |
133 | rw->counter = -1; /* remember we are locked */ | |
134 | return 1; | |
135 | } | |
1da177e4 | 136 | |
bc8846c5 KM |
137 | /* |
138 | * read_can_lock - would read_trylock() succeed? | |
139 | * @lock: the rwlock in question. | |
140 | */ | |
141 | static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) | |
1da177e4 | 142 | { |
bc8846c5 | 143 | return rw->counter >= 0; |
1da177e4 LT |
144 | } |
145 | ||
bc8846c5 KM |
146 | /* |
147 | * write_can_lock - would write_trylock() succeed? | |
148 | * @lock: the rwlock in question. | |
149 | */ | |
150 | static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) | |
1da177e4 | 151 | { |
bc8846c5 | 152 | return !rw->counter; |
1da177e4 LT |
153 | } |
154 | ||
ef6edc97 MS |
155 | #define _raw_spin_relax(lock) cpu_relax() |
156 | #define _raw_read_relax(lock) cpu_relax() | |
157 | #define _raw_write_relax(lock) cpu_relax() | |
158 | ||
1da177e4 | 159 | #endif /* __ASM_SPINLOCK_H */ |