]>
Commit | Line | Data |
---|---|---|
fb1c8f93 IM |
1 | #ifndef __LINUX_BIT_SPINLOCK_H |
2 | #define __LINUX_BIT_SPINLOCK_H | |
3 | ||
626d6074 NP |
4 | #include <linux/kernel.h> |
5 | #include <linux/preempt.h> | |
60063497 | 6 | #include <linux/atomic.h> |
187f1882 | 7 | #include <linux/bug.h> |
626d6074 | 8 | |
fb1c8f93 IM |
9 | /* |
10 | * bit-based spin_lock() | |
11 | * | |
12 | * Don't use this unless you really need to: spin_lock() and spin_unlock() | |
13 | * are significantly faster. | |
14 | */ | |
15 | static inline void bit_spin_lock(int bitnum, unsigned long *addr) | |
16 | { | |
17 | /* | |
18 | * Assuming the lock is uncontended, this never enters | |
19 | * the body of the outer loop. If it is contended, then | |
20 | * within the inner loop a non-atomic test is used to | |
21 | * busywait with less bus contention for a good time to | |
22 | * attempt to acquire the lock bit. | |
23 | */ | |
24 | preempt_disable(); | |
25 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
b8dc93cb | 26 | while (unlikely(test_and_set_bit_lock(bitnum, addr))) { |
3dd2ee48 LT |
27 | preempt_enable(); |
28 | do { | |
fb1c8f93 | 29 | cpu_relax(); |
3dd2ee48 LT |
30 | } while (test_bit(bitnum, addr)); |
31 | preempt_disable(); | |
fb1c8f93 IM |
32 | } |
33 | #endif | |
34 | __acquire(bitlock); | |
35 | } | |
36 | ||
37 | /* | |
38 | * Return true if it was acquired | |
39 | */ | |
40 | static inline int bit_spin_trylock(int bitnum, unsigned long *addr) | |
41 | { | |
42 | preempt_disable(); | |
43 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
b8dc93cb | 44 | if (unlikely(test_and_set_bit_lock(bitnum, addr))) { |
fb1c8f93 IM |
45 | preempt_enable(); |
46 | return 0; | |
47 | } | |
48 | #endif | |
49 | __acquire(bitlock); | |
50 | return 1; | |
51 | } | |
52 | ||
53 | /* | |
54 | * bit-based spin_unlock() | |
55 | */ | |
56 | static inline void bit_spin_unlock(int bitnum, unsigned long *addr) | |
57 | { | |
b8dc93cb NP |
58 | #ifdef CONFIG_DEBUG_SPINLOCK |
59 | BUG_ON(!test_bit(bitnum, addr)); | |
60 | #endif | |
fb1c8f93 | 61 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
b8dc93cb NP |
62 | clear_bit_unlock(bitnum, addr); |
63 | #endif | |
64 | preempt_enable(); | |
65 | __release(bitlock); | |
66 | } | |
67 | ||
68 | /* | |
69 | * bit-based spin_unlock() | |
70 | * non-atomic version, which can be used eg. if the bit lock itself is | |
71 | * protecting the rest of the flags in the word. | |
72 | */ | |
73 | static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) | |
74 | { | |
75 | #ifdef CONFIG_DEBUG_SPINLOCK | |
fb1c8f93 | 76 | BUG_ON(!test_bit(bitnum, addr)); |
b8dc93cb NP |
77 | #endif |
78 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
79 | __clear_bit_unlock(bitnum, addr); | |
fb1c8f93 IM |
80 | #endif |
81 | preempt_enable(); | |
82 | __release(bitlock); | |
83 | } | |
84 | ||
85 | /* | |
86 | * Return true if the lock is held. | |
87 | */ | |
88 | static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) | |
89 | { | |
90 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | |
91 | return test_bit(bitnum, addr); | |
bdd4e85d | 92 | #elif defined CONFIG_PREEMPT_COUNT |
fb1c8f93 IM |
93 | return preempt_count(); |
94 | #else | |
95 | return 1; | |
96 | #endif | |
97 | } | |
98 | ||
99 | #endif /* __LINUX_BIT_SPINLOCK_H */ | |
100 |