]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* spinlock.h: 32-bit Sparc spinlock support. |
2 | * | |
3 | * Copyright (C) 1997 David S. Miller ([email protected]) | |
4 | */ | |
5 | ||
6 | #ifndef __SPARC_SPINLOCK_H | |
7 | #define __SPARC_SPINLOCK_H | |
8 | ||
9 | #include <linux/threads.h> /* For NR_CPUS */ | |
10 | ||
11 | #ifndef __ASSEMBLY__ | |
12 | ||
13 | #include <asm/psr.h> | |
14 | ||
fb1c8f93 | 15 | #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) |
1da177e4 | 16 | |
fb1c8f93 IM |
17 | #define __raw_spin_unlock_wait(lock) \ |
18 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) | |
1da177e4 | 19 | |
3115624e | 20 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
1da177e4 LT |
21 | { |
22 | __asm__ __volatile__( | |
23 | "\n1:\n\t" | |
24 | "ldstub [%0], %%g2\n\t" | |
25 | "orcc %%g2, 0x0, %%g0\n\t" | |
26 | "bne,a 2f\n\t" | |
27 | " ldub [%0], %%g2\n\t" | |
28 | ".subsection 2\n" | |
29 | "2:\n\t" | |
30 | "orcc %%g2, 0x0, %%g0\n\t" | |
31 | "bne,a 2b\n\t" | |
32 | " ldub [%0], %%g2\n\t" | |
33 | "b,a 1b\n\t" | |
34 | ".previous\n" | |
35 | : /* no outputs */ | |
36 | : "r" (lock) | |
37 | : "g2", "memory", "cc"); | |
38 | } | |
39 | ||
3115624e | 40 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
1da177e4 LT |
41 | { |
42 | unsigned int result; | |
43 | __asm__ __volatile__("ldstub [%1], %0" | |
44 | : "=r" (result) | |
45 | : "r" (lock) | |
46 | : "memory"); | |
47 | return (result == 0); | |
48 | } | |
49 | ||
3115624e | 50 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4 LT |
51 | { |
52 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | |
53 | } | |
54 | ||
55 | /* Read-write spinlocks, allowing multiple readers | |
56 | * but only one writer. | |
57 | * | |
58 | * NOTE! it is quite common to have readers in interrupts | |
59 | * but no interrupt writers. For those circumstances we | |
60 | * can "mix" irq-safe locks - any writer needs to get a | |
61 | * irq-safe write-lock, but readers can get non-irqsafe | |
62 | * read-locks. | |
63 | * | |
64 | * XXX This might create some problems with my dual spinlock | |
65 | * XXX scheme, deadlocks etc. -DaveM | |
fb1c8f93 IM |
66 | * |
67 | * Sort of like atomic_t's on Sparc, but even more clever. | |
1da177e4 LT |
68 | * |
69 | * ------------------------------------ | |
fb1c8f93 | 70 | * | 24-bit counter | wlock | raw_rwlock_t |
1da177e4 LT |
71 | * ------------------------------------ |
72 | * 31 8 7 0 | |
73 | * | |
74 | * wlock signifies the one writer is in or somebody is updating | |
75 | * counter. For a writer, if he successfully acquires the wlock, | |
76 | * but counter is non-zero, he has to release the lock and wait, | |
77 | * till both counter and wlock are zero. | |
78 | * | |
79 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | |
80 | */ | |
3115624e | 81 | static inline void __read_lock(raw_rwlock_t *rw) |
1da177e4 | 82 | { |
fb1c8f93 | 83 | register raw_rwlock_t *lp asm("g1"); |
1da177e4 LT |
84 | lp = rw; |
85 | __asm__ __volatile__( | |
86 | "mov %%o7, %%g4\n\t" | |
87 | "call ___rw_read_enter\n\t" | |
88 | " ldstub [%%g1 + 3], %%g2\n" | |
89 | : /* no outputs */ | |
90 | : "r" (lp) | |
91 | : "g2", "g4", "memory", "cc"); | |
92 | } | |
93 | ||
fb1c8f93 | 94 | #define __raw_read_lock(lock) \ |
1da177e4 LT |
95 | do { unsigned long flags; \ |
96 | local_irq_save(flags); \ | |
a54123e2 | 97 | __read_lock(lock); \ |
1da177e4 LT |
98 | local_irq_restore(flags); \ |
99 | } while(0) | |
100 | ||
3115624e | 101 | static inline void __read_unlock(raw_rwlock_t *rw) |
1da177e4 | 102 | { |
fb1c8f93 | 103 | register raw_rwlock_t *lp asm("g1"); |
1da177e4 LT |
104 | lp = rw; |
105 | __asm__ __volatile__( | |
106 | "mov %%o7, %%g4\n\t" | |
107 | "call ___rw_read_exit\n\t" | |
108 | " ldstub [%%g1 + 3], %%g2\n" | |
109 | : /* no outputs */ | |
110 | : "r" (lp) | |
111 | : "g2", "g4", "memory", "cc"); | |
112 | } | |
113 | ||
fb1c8f93 | 114 | #define __raw_read_unlock(lock) \ |
1da177e4 LT |
115 | do { unsigned long flags; \ |
116 | local_irq_save(flags); \ | |
a54123e2 | 117 | __read_unlock(lock); \ |
1da177e4 LT |
118 | local_irq_restore(flags); \ |
119 | } while(0) | |
120 | ||
a54123e2 | 121 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
1da177e4 | 122 | { |
fb1c8f93 | 123 | register raw_rwlock_t *lp asm("g1"); |
1da177e4 LT |
124 | lp = rw; |
125 | __asm__ __volatile__( | |
126 | "mov %%o7, %%g4\n\t" | |
127 | "call ___rw_write_enter\n\t" | |
128 | " ldstub [%%g1 + 3], %%g2\n" | |
129 | : /* no outputs */ | |
130 | : "r" (lp) | |
131 | : "g2", "g4", "memory", "cc"); | |
132 | } | |
133 | ||
a54123e2 BB |
134 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
135 | { | |
136 | unsigned int val; | |
137 | ||
138 | __asm__ __volatile__("ldstub [%1 + 3], %0" | |
139 | : "=r" (val) | |
140 | : "r" (&rw->lock) | |
141 | : "memory"); | |
142 | ||
143 | if (val == 0) { | |
144 | val = rw->lock & ~0xff; | |
145 | if (val) | |
146 | ((volatile u8*)&rw->lock)[3] = 0; | |
147 | } | |
148 | ||
149 | return (val == 0); | |
150 | } | |
151 | ||
fb1c8f93 | 152 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) |
1da177e4 | 153 | |
fb1c8f93 | 154 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
a54123e2 | 155 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
1da177e4 | 156 | |
ef6edc97 MS |
157 | #define _raw_spin_relax(lock) cpu_relax() |
158 | #define _raw_read_relax(lock) cpu_relax() | |
159 | #define _raw_write_relax(lock) cpu_relax() | |
160 | ||
61fc12d8 BB |
161 | #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) |
162 | #define __raw_write_can_lock(rw) (!(rw)->lock) | |
163 | ||
1da177e4 LT |
164 | #endif /* !(__ASSEMBLY__) */ |
165 | ||
166 | #endif /* __SPARC_SPINLOCK_H */ |