]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * include/asm-sh/spinlock.h | |
3 | * | |
4 | * Copyright (C) 2002, 2003 Paul Mundt | |
5 | * | |
6 | * This file is subject to the terms and conditions of the GNU General Public | |
7 | * License. See the file "COPYING" in the main directory of this archive | |
8 | * for more details. | |
9 | */ | |
10 | #ifndef __ASM_SH_SPINLOCK_H | |
11 | #define __ASM_SH_SPINLOCK_H | |
12 | ||
13 | #include <asm/atomic.h> | |
14 | ||
15 | /* | |
16 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
17 | */ | |
1da177e4 | 18 | |
fb1c8f93 IM |
19 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
20 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | |
21 | #define __raw_spin_unlock_wait(x) \ | |
22 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | |
1da177e4 LT |
23 | |
24 | /* | |
25 | * Simple spin lock operations. There are two variants, one clears IRQ's | |
26 | * on the local processor, one does not. | |
27 | * | |
28 | * We make no fairness assumptions. They have a cost. | |
29 | */ | |
fb1c8f93 | 30 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
1da177e4 LT |
31 | { |
32 | __asm__ __volatile__ ( | |
33 | "1:\n\t" | |
34 | "tas.b @%0\n\t" | |
35 | "bf/s 1b\n\t" | |
36 | "nop\n\t" | |
37 | : "=r" (lock->lock) | |
38 | : "r" (&lock->lock) | |
39 | : "t", "memory" | |
40 | ); | |
41 | } | |
42 | ||
fb1c8f93 | 43 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4 LT |
44 | { |
45 | assert_spin_locked(lock); | |
46 | ||
47 | lock->lock = 0; | |
48 | } | |
49 | ||
fb1c8f93 | 50 | #define __raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) |
1da177e4 LT |
51 | |
52 | /* | |
53 | * Read-write spinlocks, allowing multiple readers but only one writer. | |
54 | * | |
55 | * NOTE! it is quite common to have readers in interrupts but no interrupt | |
56 | * writers. For those circumstances we can "mix" irq-safe locks - any writer | |
57 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe | |
58 | * read-locks. | |
59 | */ | |
fb1c8f93 IM |
60 | |
61 | static inline void __raw_read_lock(raw_rwlock_t *rw) | |
1da177e4 | 62 | { |
fb1c8f93 | 63 | __raw_spin_lock(&rw->lock); |
1da177e4 LT |
64 | |
65 | atomic_inc(&rw->counter); | |
66 | ||
fb1c8f93 | 67 | __raw_spin_unlock(&rw->lock); |
1da177e4 LT |
68 | } |
69 | ||
fb1c8f93 | 70 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
1da177e4 | 71 | { |
fb1c8f93 | 72 | __raw_spin_lock(&rw->lock); |
1da177e4 LT |
73 | |
74 | atomic_dec(&rw->counter); | |
75 | ||
fb1c8f93 | 76 | __raw_spin_unlock(&rw->lock); |
1da177e4 LT |
77 | } |
78 | ||
fb1c8f93 | 79 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
1da177e4 | 80 | { |
fb1c8f93 | 81 | __raw_spin_lock(&rw->lock); |
1da177e4 LT |
82 | atomic_set(&rw->counter, -1); |
83 | } | |
84 | ||
fb1c8f93 | 85 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
1da177e4 LT |
86 | { |
87 | atomic_set(&rw->counter, 0); | |
fb1c8f93 | 88 | __raw_spin_unlock(&rw->lock); |
1da177e4 LT |
89 | } |
90 | ||
fb1c8f93 | 91 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
1da177e4 | 92 | |
fb1c8f93 | 93 | static inline int __raw_write_trylock(raw_rwlock_t *rw) |
1da177e4 LT |
94 | { |
95 | if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter)) | |
96 | return 1; | |
97 | ||
98 | atomic_add(RW_LOCK_BIAS, &rw->counter); | |
99 | ||
100 | return 0; | |
101 | } | |
102 | ||
ef6edc97 MS |
103 | #define _raw_spin_relax(lock) cpu_relax() |
104 | #define _raw_read_relax(lock) cpu_relax() | |
105 | #define _raw_write_relax(lock) cpu_relax() | |
106 | ||
1da177e4 | 107 | #endif /* __ASM_SH_SPINLOCK_H */ |