]>
Commit | Line | Data |
---|---|---|
fb1c8f93 IM |
1 | #ifndef __LINUX_SPINLOCK_API_SMP_H |
2 | #define __LINUX_SPINLOCK_API_SMP_H | |
3 | ||
4 | #ifndef __LINUX_SPINLOCK_H | |
5 | # error "please don't include this file directly" | |
6 | #endif | |
7 | ||
8 | /* | |
9 | * include/linux/spinlock_api_smp.h | |
10 | * | |
11 | * spinlock API declarations on SMP (and debug) | |
12 | * (implemented in kernel/spinlock.c) | |
13 | * | |
14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | |
15 | * Released under the General Public License (GPL). | |
16 | */ | |
17 | ||
18 | int in_lock_functions(unsigned long addr); | |
19 | ||
c2f21ce2 | 20 | #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) |
fb1c8f93 | 21 | |
9c1721aa TG |
22 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
23 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) | |
24 | __acquires(lock); | |
c2f21ce2 | 25 | void __lockfunc |
9c1721aa TG |
26 | _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) |
27 | __acquires(lock); | |
28 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); | |
29 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) | |
30 | __acquires(lock); | |
31 | ||
32 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) | |
33 | __acquires(lock); | |
c2f21ce2 | 34 | unsigned long __lockfunc |
9c1721aa TG |
35 | _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) |
36 | __acquires(lock); | |
37 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); | |
38 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); | |
39 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); | |
40 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); | |
41 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); | |
c2f21ce2 | 42 | void __lockfunc |
9c1721aa TG |
43 | _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) |
44 | __releases(lock); | |
fb1c8f93 | 45 | |
6beb0009 | 46 | #ifdef CONFIG_INLINE_SPIN_LOCK |
9c1721aa | 47 | #define _raw_spin_lock(lock) __raw_spin_lock(lock) |
892a7c67 HC |
48 | #endif |
49 | ||
6beb0009 | 50 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
9c1721aa | 51 | #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) |
892a7c67 HC |
52 | #endif |
53 | ||
6beb0009 | 54 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
9c1721aa | 55 | #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) |
892a7c67 HC |
56 | #endif |
57 | ||
6beb0009 | 58 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
9c1721aa | 59 | #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) |
892a7c67 HC |
60 | #endif |
61 | ||
6beb0009 | 62 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
9c1721aa | 63 | #define _raw_spin_trylock(lock) __raw_spin_trylock(lock) |
892a7c67 HC |
64 | #endif |
65 | ||
6beb0009 | 66 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
9c1721aa | 67 | #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) |
892a7c67 HC |
68 | #endif |
69 | ||
6beb0009 | 70 | #ifdef CONFIG_INLINE_SPIN_UNLOCK |
9c1721aa | 71 | #define _raw_spin_unlock(lock) __raw_spin_unlock(lock) |
892a7c67 HC |
72 | #endif |
73 | ||
6beb0009 | 74 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
9c1721aa | 75 | #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) |
892a7c67 HC |
76 | #endif |
77 | ||
6beb0009 | 78 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
9c1721aa | 79 | #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) |
892a7c67 HC |
80 | #endif |
81 | ||
6beb0009 | 82 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
9c1721aa | 83 | #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) |
892a7c67 HC |
84 | #endif |
85 | ||
9c1721aa | 86 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
69d0ee73 HC |
87 | { |
88 | preempt_disable(); | |
9828ea9d | 89 | if (do_raw_spin_trylock(lock)) { |
69d0ee73 HC |
90 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
91 | return 1; | |
92 | } | |
93 | preempt_enable(); | |
94 | return 0; | |
95 | } | |
96 | ||
69d0ee73 HC |
97 | /* |
98 | * If lockdep is enabled then we use the non-preemption spin-ops | |
99 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | |
100 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | |
101 | */ | |
102 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | |
103 | ||
9c1721aa | 104 | static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) |
69d0ee73 HC |
105 | { |
106 | unsigned long flags; | |
107 | ||
108 | local_irq_save(flags); | |
109 | preempt_disable(); | |
110 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
111 | /* | |
112 | * On lockdep we dont want the hand-coded irq-enable of | |
9828ea9d | 113 | * do_raw_spin_lock_flags() code, because lockdep assumes |
69d0ee73 HC |
114 | * that interrupts are not re-enabled during lock-acquire: |
115 | */ | |
116 | #ifdef CONFIG_LOCKDEP | |
9828ea9d | 117 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 | 118 | #else |
9828ea9d | 119 | do_raw_spin_lock_flags(lock, &flags); |
69d0ee73 HC |
120 | #endif |
121 | return flags; | |
122 | } | |
123 | ||
9c1721aa | 124 | static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) |
69d0ee73 HC |
125 | { |
126 | local_irq_disable(); | |
127 | preempt_disable(); | |
128 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
9828ea9d | 129 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
130 | } |
131 | ||
9c1721aa | 132 | static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) |
69d0ee73 HC |
133 | { |
134 | local_bh_disable(); | |
135 | preempt_disable(); | |
136 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
9828ea9d | 137 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
138 | } |
139 | ||
9c1721aa | 140 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
69d0ee73 HC |
141 | { |
142 | preempt_disable(); | |
143 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | |
9828ea9d | 144 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
69d0ee73 HC |
145 | } |
146 | ||
69d0ee73 HC |
147 | #endif /* CONFIG_PREEMPT */ |
148 | ||
9c1721aa | 149 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
69d0ee73 HC |
150 | { |
151 | spin_release(&lock->dep_map, 1, _RET_IP_); | |
9828ea9d | 152 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
153 | preempt_enable(); |
154 | } | |
155 | ||
9c1721aa | 156 | static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, |
69d0ee73 HC |
157 | unsigned long flags) |
158 | { | |
159 | spin_release(&lock->dep_map, 1, _RET_IP_); | |
9828ea9d | 160 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
161 | local_irq_restore(flags); |
162 | preempt_enable(); | |
163 | } | |
164 | ||
9c1721aa | 165 | static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) |
69d0ee73 HC |
166 | { |
167 | spin_release(&lock->dep_map, 1, _RET_IP_); | |
9828ea9d | 168 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
169 | local_irq_enable(); |
170 | preempt_enable(); | |
171 | } | |
172 | ||
9c1721aa | 173 | static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) |
69d0ee73 HC |
174 | { |
175 | spin_release(&lock->dep_map, 1, _RET_IP_); | |
9828ea9d | 176 | do_raw_spin_unlock(lock); |
69d0ee73 HC |
177 | preempt_enable_no_resched(); |
178 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | |
179 | } | |
180 | ||
9c1721aa | 181 | static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) |
69d0ee73 HC |
182 | { |
183 | local_bh_disable(); | |
184 | preempt_disable(); | |
9828ea9d | 185 | if (do_raw_spin_trylock(lock)) { |
69d0ee73 HC |
186 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
187 | return 1; | |
188 | } | |
189 | preempt_enable_no_resched(); | |
190 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | |
191 | return 0; | |
192 | } | |
193 | ||
6b6b4792 TG |
194 | #include <linux/rwlock_api_smp.h> |
195 | ||
fb1c8f93 | 196 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |