]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* rwsem-spinlock.c: R/W semaphores: contention handling functions for |
2 | * generic spinlock implementation | |
3 | * | |
4 | * Copyright (c) 2001 David Howells ([email protected]). | |
5 | * - Derived partially from idea by Andrea Arcangeli <[email protected]> | |
6 | * - Derived also from comments by Linus | |
7 | */ | |
8 | #include <linux/rwsem.h> | |
9 | #include <linux/sched.h> | |
8bc3bcc9 | 10 | #include <linux/export.h> |
1da177e4 | 11 | |
e2d57f78 ML |
12 | enum rwsem_waiter_type { |
13 | RWSEM_WAITING_FOR_WRITE, | |
14 | RWSEM_WAITING_FOR_READ | |
15 | }; | |
16 | ||
1da177e4 LT |
17 | struct rwsem_waiter { |
18 | struct list_head list; | |
19 | struct task_struct *task; | |
e2d57f78 | 20 | enum rwsem_waiter_type type; |
1da177e4 LT |
21 | }; |
22 | ||
29671f22 AW |
23 | int rwsem_is_locked(struct rw_semaphore *sem) |
24 | { | |
25 | int ret = 1; | |
26 | unsigned long flags; | |
27 | ||
ddb6c9b5 | 28 | if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { |
29671f22 | 29 | ret = (sem->activity != 0); |
ddb6c9b5 | 30 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
29671f22 AW |
31 | } |
32 | return ret; | |
33 | } | |
34 | EXPORT_SYMBOL(rwsem_is_locked); | |
35 | ||
1da177e4 LT |
36 | /* |
37 | * initialise the semaphore | |
38 | */ | |
4ea2176d IM |
39 | void __init_rwsem(struct rw_semaphore *sem, const char *name, |
40 | struct lock_class_key *key) | |
1da177e4 | 41 | { |
4ea2176d IM |
42 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
43 | /* | |
44 | * Make sure we are not reinitializing a held semaphore: | |
45 | */ | |
46 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | |
4dfbb9d8 | 47 | lockdep_init_map(&sem->dep_map, name, key, 0); |
4ea2176d | 48 | #endif |
1da177e4 | 49 | sem->activity = 0; |
ddb6c9b5 | 50 | raw_spin_lock_init(&sem->wait_lock); |
1da177e4 | 51 | INIT_LIST_HEAD(&sem->wait_list); |
1da177e4 | 52 | } |
118d52da | 53 | EXPORT_SYMBOL(__init_rwsem); |
1da177e4 LT |
54 | |
55 | /* | |
56 | * handle the lock release when processes blocked on it that can now run | |
57 | * - if we come here, then: | |
58 | * - the 'active count' _reached_ zero | |
59 | * - the 'waiting count' is non-zero | |
60 | * - the spinlock must be held by the caller | |
61 | * - woken process blocks are discarded from the list after having task zeroed | |
62 | * - writers are only woken if wakewrite is non-zero | |
63 | */ | |
64 | static inline struct rw_semaphore * | |
65 | __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |
66 | { | |
67 | struct rwsem_waiter *waiter; | |
68 | struct task_struct *tsk; | |
69 | int woken; | |
70 | ||
1da177e4 LT |
71 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
72 | ||
e2d57f78 | 73 | if (waiter->type == RWSEM_WAITING_FOR_WRITE) { |
8cf5322c ML |
74 | if (wakewrite) |
75 | /* Wake up a writer. Note that we do not grant it the | |
76 | * lock - it will have to acquire it when it runs. */ | |
77 | wake_up_process(waiter->task); | |
1da177e4 LT |
78 | goto out; |
79 | } | |
80 | ||
81 | /* grant an infinite number of read locks to the front of the queue */ | |
1da177e4 | 82 | woken = 0; |
8cf5322c | 83 | do { |
1da177e4 LT |
84 | struct list_head *next = waiter->list.next; |
85 | ||
86 | list_del(&waiter->list); | |
87 | tsk = waiter->task; | |
d59dd462 | 88 | smp_mb(); |
1da177e4 LT |
89 | waiter->task = NULL; |
90 | wake_up_process(tsk); | |
91 | put_task_struct(tsk); | |
92 | woken++; | |
8cf5322c | 93 | if (next == &sem->wait_list) |
1da177e4 LT |
94 | break; |
95 | waiter = list_entry(next, struct rwsem_waiter, list); | |
8cf5322c | 96 | } while (waiter->type != RWSEM_WAITING_FOR_WRITE); |
1da177e4 LT |
97 | |
98 | sem->activity += woken; | |
99 | ||
100 | out: | |
1da177e4 LT |
101 | return sem; |
102 | } | |
103 | ||
104 | /* | |
105 | * wake a single writer | |
106 | */ | |
107 | static inline struct rw_semaphore * | |
108 | __rwsem_wake_one_writer(struct rw_semaphore *sem) | |
109 | { | |
110 | struct rwsem_waiter *waiter; | |
1da177e4 LT |
111 | |
112 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | |
41ef8f82 | 113 | wake_up_process(waiter->task); |
1da177e4 | 114 | |
1da177e4 LT |
115 | return sem; |
116 | } | |
117 | ||
118 | /* | |
119 | * get a read lock on the semaphore | |
120 | */ | |
9f741cb8 | 121 | void __sched __down_read(struct rw_semaphore *sem) |
1da177e4 LT |
122 | { |
123 | struct rwsem_waiter waiter; | |
124 | struct task_struct *tsk; | |
3eac4aba | 125 | unsigned long flags; |
1da177e4 | 126 | |
ddb6c9b5 | 127 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
128 | |
129 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | |
130 | /* granted */ | |
131 | sem->activity++; | |
ddb6c9b5 | 132 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 LT |
133 | goto out; |
134 | } | |
135 | ||
136 | tsk = current; | |
137 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
138 | ||
139 | /* set up my own style of waitqueue */ | |
140 | waiter.task = tsk; | |
e2d57f78 | 141 | waiter.type = RWSEM_WAITING_FOR_READ; |
1da177e4 LT |
142 | get_task_struct(tsk); |
143 | ||
144 | list_add_tail(&waiter.list, &sem->wait_list); | |
145 | ||
146 | /* we don't need to touch the semaphore struct anymore */ | |
ddb6c9b5 | 147 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 LT |
148 | |
149 | /* wait to be given the lock */ | |
150 | for (;;) { | |
151 | if (!waiter.task) | |
152 | break; | |
153 | schedule(); | |
154 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
155 | } | |
156 | ||
157 | tsk->state = TASK_RUNNING; | |
1da177e4 | 158 | out: |
c4e05116 | 159 | ; |
1da177e4 LT |
160 | } |
161 | ||
162 | /* | |
163 | * trylock for reading -- returns 1 if successful, 0 if contention | |
164 | */ | |
9f741cb8 | 165 | int __down_read_trylock(struct rw_semaphore *sem) |
1da177e4 LT |
166 | { |
167 | unsigned long flags; | |
168 | int ret = 0; | |
169 | ||
1da177e4 | 170 | |
ddb6c9b5 | 171 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
172 | |
173 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | |
174 | /* granted */ | |
175 | sem->activity++; | |
176 | ret = 1; | |
177 | } | |
178 | ||
ddb6c9b5 | 179 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 | 180 | |
1da177e4 LT |
181 | return ret; |
182 | } | |
183 | ||
184 | /* | |
185 | * get a write lock on the semaphore | |
1da177e4 | 186 | */ |
9f741cb8 | 187 | void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) |
1da177e4 LT |
188 | { |
189 | struct rwsem_waiter waiter; | |
190 | struct task_struct *tsk; | |
3eac4aba | 191 | unsigned long flags; |
1da177e4 | 192 | |
ddb6c9b5 | 193 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 | 194 | |
1da177e4 | 195 | /* set up my own style of waitqueue */ |
41ef8f82 | 196 | tsk = current; |
1da177e4 | 197 | waiter.task = tsk; |
e2d57f78 | 198 | waiter.type = RWSEM_WAITING_FOR_WRITE; |
1da177e4 LT |
199 | list_add_tail(&waiter.list, &sem->wait_list); |
200 | ||
41ef8f82 | 201 | /* wait for someone to release the lock */ |
1da177e4 | 202 | for (;;) { |
41ef8f82 YL |
203 | /* |
204 | * That is the key to support write lock stealing: allows the | |
205 | * task already on CPU to get the lock soon rather than put | |
206 | * itself into sleep and waiting for system woke it or someone | |
207 | * else in the head of the wait list up. | |
208 | */ | |
209 | if (sem->activity == 0) | |
1da177e4 | 210 | break; |
1da177e4 | 211 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
41ef8f82 YL |
212 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
213 | schedule(); | |
214 | raw_spin_lock_irqsave(&sem->wait_lock, flags); | |
1da177e4 | 215 | } |
41ef8f82 YL |
216 | /* got the lock */ |
217 | sem->activity = -1; | |
218 | list_del(&waiter.list); | |
1da177e4 | 219 | |
41ef8f82 | 220 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 LT |
221 | } |
222 | ||
9f741cb8 | 223 | void __sched __down_write(struct rw_semaphore *sem) |
4ea2176d IM |
224 | { |
225 | __down_write_nested(sem, 0); | |
226 | } | |
227 | ||
1da177e4 LT |
228 | /* |
229 | * trylock for writing -- returns 1 if successful, 0 if contention | |
230 | */ | |
9f741cb8 | 231 | int __down_write_trylock(struct rw_semaphore *sem) |
1da177e4 LT |
232 | { |
233 | unsigned long flags; | |
234 | int ret = 0; | |
235 | ||
ddb6c9b5 | 236 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 | 237 | |
41ef8f82 YL |
238 | if (sem->activity == 0) { |
239 | /* got the lock */ | |
1da177e4 LT |
240 | sem->activity = -1; |
241 | ret = 1; | |
242 | } | |
243 | ||
ddb6c9b5 | 244 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 | 245 | |
1da177e4 LT |
246 | return ret; |
247 | } | |
248 | ||
249 | /* | |
250 | * release a read lock on the semaphore | |
251 | */ | |
9f741cb8 | 252 | void __up_read(struct rw_semaphore *sem) |
1da177e4 LT |
253 | { |
254 | unsigned long flags; | |
255 | ||
ddb6c9b5 | 256 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
257 | |
258 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | |
259 | sem = __rwsem_wake_one_writer(sem); | |
260 | ||
ddb6c9b5 | 261 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 LT |
262 | } |
263 | ||
264 | /* | |
265 | * release a write lock on the semaphore | |
266 | */ | |
9f741cb8 | 267 | void __up_write(struct rw_semaphore *sem) |
1da177e4 LT |
268 | { |
269 | unsigned long flags; | |
270 | ||
ddb6c9b5 | 271 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
272 | |
273 | sem->activity = 0; | |
274 | if (!list_empty(&sem->wait_list)) | |
275 | sem = __rwsem_do_wake(sem, 1); | |
276 | ||
ddb6c9b5 | 277 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 LT |
278 | } |
279 | ||
280 | /* | |
281 | * downgrade a write lock into a read lock | |
282 | * - just wake up any readers at the front of the queue | |
283 | */ | |
9f741cb8 | 284 | void __downgrade_write(struct rw_semaphore *sem) |
1da177e4 LT |
285 | { |
286 | unsigned long flags; | |
287 | ||
ddb6c9b5 | 288 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
289 | |
290 | sem->activity = 1; | |
291 | if (!list_empty(&sem->wait_list)) | |
292 | sem = __rwsem_do_wake(sem, 0); | |
293 | ||
ddb6c9b5 | 294 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 LT |
295 | } |
296 |