]>
Commit | Line | Data |
---|---|---|
51533b61 MS |
1 | #ifndef __ASM_ARCH_SPINLOCK_H |
2 | #define __ASM_ARCH_SPINLOCK_H | |
3 | ||
4 | #include <asm/system.h> | |
5 | ||
6 | #define RW_LOCK_BIAS 0x01000000 | |
7 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 } | |
8 | #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) | |
9 | ||
10 | #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) | |
11 | #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) | |
12 | ||
13 | extern void cris_spin_unlock(void *l, int val); | |
14 | extern void cris_spin_lock(void *l); | |
15 | extern int cris_spin_trylock(void* l); | |
16 | ||
17 | static inline void _raw_spin_unlock(spinlock_t *lock) | |
18 | { | |
19 | __asm__ volatile ("move.d %1,%0" \ | |
20 | : "=m" (lock->lock) \ | |
21 | : "r" (1) \ | |
22 | : "memory"); | |
23 | } | |
24 | ||
25 | static inline int _raw_spin_trylock(spinlock_t *lock) | |
26 | { | |
27 | return cris_spin_trylock((void*)&lock->lock); | |
28 | } | |
29 | ||
30 | static inline void _raw_spin_lock(spinlock_t *lock) | |
31 | { | |
32 | cris_spin_lock((void*)&lock->lock); | |
33 | } | |
34 | ||
35 | static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |
36 | { | |
37 | _raw_spin_lock(lock); | |
38 | } | |
39 | ||
40 | /* | |
41 | * Read-write spinlocks, allowing multiple readers | |
42 | * but only one writer. | |
43 | * | |
44 | * NOTE! it is quite common to have readers in interrupts | |
45 | * but no interrupt writers. For those circumstances we | |
46 | * can "mix" irq-safe locks - any writer needs to get a | |
47 | * irq-safe write-lock, but readers can get non-irqsafe | |
48 | * read-locks. | |
49 | */ | |
50 | typedef struct { | |
51 | spinlock_t lock; | |
52 | volatile int counter; | |
53 | #ifdef CONFIG_PREEMPT | |
54 | unsigned int break_lock; | |
55 | #endif | |
56 | } rwlock_t; | |
57 | ||
58 | #define RW_LOCK_UNLOCKED (rwlock_t) { {1}, 0 } | |
59 | ||
60 | #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0) | |
61 | ||
62 | /** | |
63 | * read_can_lock - would read_trylock() succeed? | |
64 | * @lock: the rwlock in question. | |
65 | */ | |
66 | #define read_can_lock(x) ((int)(x)->counter >= 0) | |
67 | ||
68 | /** | |
69 | * write_can_lock - would write_trylock() succeed? | |
70 | * @lock: the rwlock in question. | |
71 | */ | |
72 | #define write_can_lock(x) ((x)->counter == 0) | |
73 | ||
74 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | |
75 | ||
76 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow | |
77 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ | |
78 | ||
79 | static __inline__ void _raw_read_lock(rwlock_t *rw) | |
80 | { | |
81 | unsigned long flags; | |
82 | local_irq_save(flags); | |
83 | _raw_spin_lock(&rw->lock); | |
84 | ||
85 | rw->counter++; | |
86 | ||
87 | _raw_spin_unlock(&rw->lock); | |
88 | local_irq_restore(flags); | |
89 | } | |
90 | ||
91 | static __inline__ void _raw_read_unlock(rwlock_t *rw) | |
92 | { | |
93 | unsigned long flags; | |
94 | local_irq_save(flags); | |
95 | _raw_spin_lock(&rw->lock); | |
96 | ||
97 | rw->counter--; | |
98 | ||
99 | _raw_spin_unlock(&rw->lock); | |
100 | local_irq_restore(flags); | |
101 | } | |
102 | ||
103 | /* write_lock is less trivial. We optimistically grab the lock and check | |
104 | * if we surprised any readers. If so we release the lock and wait till | |
105 | * they're all gone before trying again | |
106 | * | |
107 | * Also note that we don't use the _irqsave / _irqrestore suffixes here. | |
108 | * If we're called with interrupts enabled and we've got readers (or other | |
109 | * writers) in interrupt handlers someone fucked up and we'd dead-lock | |
110 | * sooner or later anyway. prumpf */ | |
111 | ||
112 | static __inline__ void _raw_write_lock(rwlock_t *rw) | |
113 | { | |
114 | retry: | |
115 | _raw_spin_lock(&rw->lock); | |
116 | ||
117 | if(rw->counter != 0) { | |
118 | /* this basically never happens */ | |
119 | _raw_spin_unlock(&rw->lock); | |
120 | ||
121 | while(rw->counter != 0); | |
122 | ||
123 | goto retry; | |
124 | } | |
125 | ||
126 | /* got it. now leave without unlocking */ | |
127 | rw->counter = -1; /* remember we are locked */ | |
128 | } | |
129 | ||
130 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ | |
131 | ||
132 | static __inline__ void _raw_write_unlock(rwlock_t *rw) | |
133 | { | |
134 | rw->counter = 0; | |
135 | _raw_spin_unlock(&rw->lock); | |
136 | } | |
137 | ||
138 | static __inline__ int _raw_write_trylock(rwlock_t *rw) | |
139 | { | |
140 | _raw_spin_lock(&rw->lock); | |
141 | if (rw->counter != 0) { | |
142 | /* this basically never happens */ | |
143 | _raw_spin_unlock(&rw->lock); | |
144 | ||
145 | return 0; | |
146 | } | |
147 | ||
148 | /* got it. now leave without unlocking */ | |
149 | rw->counter = -1; /* remember we are locked */ | |
150 | return 1; | |
151 | } | |
152 | ||
153 | static __inline__ int is_read_locked(rwlock_t *rw) | |
154 | { | |
155 | return rw->counter > 0; | |
156 | } | |
157 | ||
158 | static __inline__ int is_write_locked(rwlock_t *rw) | |
159 | { | |
160 | return rw->counter < 0; | |
161 | } | |
162 | ||
ef6edc97 MS |
163 | #define _raw_spin_relax(lock) cpu_relax() |
164 | #define _raw_read_relax(lock) cpu_relax() | |
165 | #define _raw_write_relax(lock) cpu_relax() | |
166 | ||
51533b61 | 167 | #endif /* __ASM_ARCH_SPINLOCK_H */ |