]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* rwsem.h: R/W semaphores, public interface |
2 | * | |
3 | * Written by David Howells ([email protected]). | |
4 | * Derived from asm-i386/semaphore.h | |
5 | */ | |
6 | ||
7 | #ifndef _LINUX_RWSEM_H | |
8 | #define _LINUX_RWSEM_H | |
9 | ||
10 | #include <linux/linkage.h> | |
11 | ||
1da177e4 LT |
12 | #include <linux/types.h> |
13 | #include <linux/kernel.h> | |
c16a87ce TG |
14 | #include <linux/list.h> |
15 | #include <linux/spinlock.h> | |
16 | ||
1da177e4 | 17 | #include <asm/system.h> |
60063497 | 18 | #include <linux/atomic.h> |
1da177e4 LT |
19 | |
20 | struct rw_semaphore; | |
21 | ||
22 | #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK | |
23 | #include <linux/rwsem-spinlock.h> /* use a generic implementation */ | |
24 | #else | |
1c8ed640 TG |
25 | /* All arch specific implementations share the same struct */ |
26 | struct rw_semaphore { | |
27 | long count; | |
ddb6c9b5 | 28 | raw_spinlock_t wait_lock; |
1c8ed640 TG |
29 | struct list_head wait_list; |
30 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
31 | struct lockdep_map dep_map; | |
32 | #endif | |
33 | }; | |
34 | ||
d1233754 TG |
35 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); |
36 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | |
37 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); | |
38 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | |
aac72277 | 39 | |
1c8ed640 TG |
40 | /* Include the arch specific part */ |
41 | #include <asm/rwsem.h> | |
41e5887f TG |
42 | |
43 | /* In all implementations count != 0 means locked */ | |
44 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | |
45 | { | |
46 | return sem->count != 0; | |
47 | } | |
48 | ||
1da177e4 LT |
49 | #endif |
50 | ||
12249b34 TG |
51 | /* Common initializer macros and functions */ |
52 | ||
53 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
54 | # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } | |
55 | #else | |
56 | # define __RWSEM_DEP_MAP_INIT(lockname) | |
57 | #endif | |
58 | ||
ddb6c9b5 TG |
59 | #define __RWSEM_INITIALIZER(name) \ |
60 | { RWSEM_UNLOCKED_VALUE, \ | |
61 | __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ | |
62 | LIST_HEAD_INIT((name).wait_list) \ | |
63 | __RWSEM_DEP_MAP_INIT(name) } | |
12249b34 TG |
64 | |
65 | #define DECLARE_RWSEM(name) \ | |
66 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | |
67 | ||
68 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | |
69 | struct lock_class_key *key); | |
70 | ||
71 | #define init_rwsem(sem) \ | |
72 | do { \ | |
73 | static struct lock_class_key __key; \ | |
74 | \ | |
75 | __init_rwsem((sem), #sem, &__key); \ | |
76 | } while (0) | |
77 | ||
1da177e4 LT |
78 | /* |
79 | * lock for reading | |
80 | */ | |
4ea2176d | 81 | extern void down_read(struct rw_semaphore *sem); |
1da177e4 LT |
82 | |
83 | /* | |
84 | * trylock for reading -- returns 1 if successful, 0 if contention | |
85 | */ | |
4ea2176d | 86 | extern int down_read_trylock(struct rw_semaphore *sem); |
1da177e4 LT |
87 | |
88 | /* | |
89 | * lock for writing | |
90 | */ | |
4ea2176d | 91 | extern void down_write(struct rw_semaphore *sem); |
1da177e4 LT |
92 | |
93 | /* | |
94 | * trylock for writing -- returns 1 if successful, 0 if contention | |
95 | */ | |
4ea2176d | 96 | extern int down_write_trylock(struct rw_semaphore *sem); |
1da177e4 LT |
97 | |
98 | /* | |
99 | * release a read lock | |
100 | */ | |
4ea2176d | 101 | extern void up_read(struct rw_semaphore *sem); |
1da177e4 LT |
102 | |
103 | /* | |
104 | * release a write lock | |
105 | */ | |
4ea2176d | 106 | extern void up_write(struct rw_semaphore *sem); |
1da177e4 LT |
107 | |
108 | /* | |
109 | * downgrade write lock to read lock | |
110 | */ | |
4ea2176d IM |
111 | extern void downgrade_write(struct rw_semaphore *sem); |
112 | ||
113 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
114 | /* | |
5fca80e8 IM |
115 | * nested locking. NOTE: rwsems are not allowed to recurse |
116 | * (which occurs if the same task tries to acquire the same | |
117 | * lock instance multiple times), but multiple locks of the | |
118 | * same lock class might be taken, if the order of the locks | |
119 | * is always the same. This ordering rule can be expressed | |
120 | * to lockdep via the _nested() APIs, but enumerating the | |
121 | * subclasses that are used. (If the nesting relationship is | |
122 | * static then another method for expressing nested locking is | |
123 | * the explicit definition of lock class keys and the use of | |
124 | * lockdep_set_class() at lock initialization time. | |
125 | * See Documentation/lockdep-design.txt for more details.) | |
4ea2176d IM |
126 | */ |
127 | extern void down_read_nested(struct rw_semaphore *sem, int subclass); | |
128 | extern void down_write_nested(struct rw_semaphore *sem, int subclass); | |
4ea2176d IM |
129 | #else |
130 | # define down_read_nested(sem, subclass) down_read(sem) | |
131 | # define down_write_nested(sem, subclass) down_write(sem) | |
4ea2176d | 132 | #endif |
1da177e4 | 133 | |
1da177e4 | 134 | #endif /* _LINUX_RWSEM_H */ |