]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* rwsem.c: R/W semaphores: contention handling functions |
2 | * | |
3 | * Written by David Howells ([email protected]). | |
4 | * Derived from arch/i386/kernel/semaphore.c | |
ce6711f3 AS |
5 | * |
6 | * Writer lock-stealing by Alex Shi <[email protected]> | |
fe6e674c | 7 | * and Michel Lespinasse <[email protected]> |
1da177e4 LT |
8 | */ |
9 | #include <linux/rwsem.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/init.h> | |
8bc3bcc9 | 12 | #include <linux/export.h> |
1da177e4 | 13 | |
4ea2176d IM |
14 | /* |
15 | * Initialize an rwsem: | |
16 | */ | |
17 | void __init_rwsem(struct rw_semaphore *sem, const char *name, | |
18 | struct lock_class_key *key) | |
19 | { | |
20 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
21 | /* | |
22 | * Make sure we are not reinitializing a held semaphore: | |
23 | */ | |
24 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | |
4dfbb9d8 | 25 | lockdep_init_map(&sem->dep_map, name, key, 0); |
4ea2176d IM |
26 | #endif |
27 | sem->count = RWSEM_UNLOCKED_VALUE; | |
ddb6c9b5 | 28 | raw_spin_lock_init(&sem->wait_lock); |
4ea2176d IM |
29 | INIT_LIST_HEAD(&sem->wait_list); |
30 | } | |
31 | ||
32 | EXPORT_SYMBOL(__init_rwsem); | |
33 | ||
e2d57f78 ML |
34 | enum rwsem_waiter_type { |
35 | RWSEM_WAITING_FOR_WRITE, | |
36 | RWSEM_WAITING_FOR_READ | |
37 | }; | |
38 | ||
1da177e4 LT |
39 | struct rwsem_waiter { |
40 | struct list_head list; | |
41 | struct task_struct *task; | |
e2d57f78 | 42 | enum rwsem_waiter_type type; |
1da177e4 LT |
43 | }; |
44 | ||
fe6e674c ML |
45 | enum rwsem_wake_type { |
46 | RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */ | |
47 | RWSEM_WAKE_READERS, /* Wake readers only */ | |
48 | RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */ | |
49 | }; | |
70bdc6e0 | 50 | |
1da177e4 LT |
51 | /* |
52 | * handle the lock release when processes blocked on it that can now run | |
53 | * - if we come here from up_xxxx(), then: | |
54 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) | |
55 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) | |
345af7bf | 56 | * - there must be someone on the queue |
1da177e4 LT |
57 | * - the spinlock must be held by the caller |
58 | * - woken process blocks are discarded from the list after having task zeroed | |
59 | * - writers are only woken if downgrading is false | |
60 | */ | |
70bdc6e0 | 61 | static struct rw_semaphore * |
fe6e674c | 62 | __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type) |
1da177e4 LT |
63 | { |
64 | struct rwsem_waiter *waiter; | |
65 | struct task_struct *tsk; | |
66 | struct list_head *next; | |
b5f54181 | 67 | long oldcount, woken, loop, adjustment; |
1da177e4 | 68 | |
345af7bf | 69 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
8cf5322c | 70 | if (waiter->type == RWSEM_WAITING_FOR_WRITE) { |
fe6e674c | 71 | if (wake_type == RWSEM_WAKE_ANY) |
8cf5322c ML |
72 | /* Wake writer at the front of the queue, but do not |
73 | * grant it the lock yet as we want other writers | |
74 | * to be able to steal it. Readers, on the other hand, | |
75 | * will block as they will notice the queued writer. | |
76 | */ | |
77 | wake_up_process(waiter->task); | |
345af7bf | 78 | goto out; |
8cf5322c | 79 | } |
1da177e4 | 80 | |
fe6e674c ML |
81 | /* Writers might steal the lock before we grant it to the next reader. |
82 | * We prefer to do the first reader grant before counting readers | |
83 | * so we can bail out early if a writer stole the lock. | |
70bdc6e0 | 84 | */ |
fe6e674c ML |
85 | adjustment = 0; |
86 | if (wake_type != RWSEM_WAKE_READ_OWNED) { | |
87 | adjustment = RWSEM_ACTIVE_READ_BIAS; | |
88 | try_reader_grant: | |
89 | oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; | |
90 | if (unlikely(oldcount < RWSEM_WAITING_BIAS)) { | |
91 | /* A writer stole the lock. Undo our reader grant. */ | |
92 | if (rwsem_atomic_update(-adjustment, sem) & | |
93 | RWSEM_ACTIVE_MASK) | |
94 | goto out; | |
95 | /* Last active locker left. Retry waking readers. */ | |
96 | goto try_reader_grant; | |
97 | } | |
98 | } | |
1da177e4 | 99 | |
345af7bf ML |
100 | /* Grant an infinite number of read locks to the readers at the front |
101 | * of the queue. Note we increment the 'active part' of the count by | |
102 | * the number of readers before waking any processes up. | |
1da177e4 | 103 | */ |
1da177e4 LT |
104 | woken = 0; |
105 | do { | |
106 | woken++; | |
107 | ||
108 | if (waiter->list.next == &sem->wait_list) | |
109 | break; | |
110 | ||
111 | waiter = list_entry(waiter->list.next, | |
112 | struct rwsem_waiter, list); | |
113 | ||
e2d57f78 | 114 | } while (waiter->type != RWSEM_WAITING_FOR_WRITE); |
1da177e4 | 115 | |
fe6e674c | 116 | adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment; |
e2d57f78 | 117 | if (waiter->type != RWSEM_WAITING_FOR_WRITE) |
fd41b334 ML |
118 | /* hit end of list above */ |
119 | adjustment -= RWSEM_WAITING_BIAS; | |
1da177e4 | 120 | |
fe6e674c ML |
121 | if (adjustment) |
122 | rwsem_atomic_add(adjustment, sem); | |
1da177e4 LT |
123 | |
124 | next = sem->wait_list.next; | |
8cf5322c ML |
125 | loop = woken; |
126 | do { | |
1da177e4 LT |
127 | waiter = list_entry(next, struct rwsem_waiter, list); |
128 | next = waiter->list.next; | |
129 | tsk = waiter->task; | |
d59dd462 | 130 | smp_mb(); |
1da177e4 LT |
131 | waiter->task = NULL; |
132 | wake_up_process(tsk); | |
133 | put_task_struct(tsk); | |
8cf5322c | 134 | } while (--loop); |
1da177e4 LT |
135 | |
136 | sem->wait_list.next = next; | |
137 | next->prev = &sem->wait_list; | |
138 | ||
139 | out: | |
1da177e4 | 140 | return sem; |
ce6711f3 AS |
141 | } |
142 | ||
1da177e4 | 143 | /* |
1e78277c | 144 | * wait for the read lock to be granted |
1da177e4 | 145 | */ |
1e78277c | 146 | struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) |
1da177e4 | 147 | { |
b5f54181 | 148 | long count, adjustment = -RWSEM_ACTIVE_READ_BIAS; |
a8618a0e | 149 | struct rwsem_waiter waiter; |
1da177e4 | 150 | struct task_struct *tsk = current; |
1da177e4 | 151 | |
1da177e4 | 152 | /* set up my own style of waitqueue */ |
a8618a0e | 153 | waiter.task = tsk; |
da16922c | 154 | waiter.type = RWSEM_WAITING_FOR_READ; |
1da177e4 LT |
155 | get_task_struct(tsk); |
156 | ||
f7dd1cee | 157 | raw_spin_lock_irq(&sem->wait_lock); |
fd41b334 ML |
158 | if (list_empty(&sem->wait_list)) |
159 | adjustment += RWSEM_WAITING_BIAS; | |
a8618a0e | 160 | list_add_tail(&waiter.list, &sem->wait_list); |
1da177e4 | 161 | |
70bdc6e0 | 162 | /* we're now waiting on the lock, but no longer actively locking */ |
1da177e4 LT |
163 | count = rwsem_atomic_update(adjustment, sem); |
164 | ||
25c39325 ML |
165 | /* If there are no active locks, wake the front queued process(es). |
166 | * | |
167 | * If there are no writers and we are first in the queue, | |
168 | * wake our own waiter to join the existing active readers ! | |
169 | */ | |
170 | if (count == RWSEM_WAITING_BIAS || | |
171 | (count > RWSEM_WAITING_BIAS && | |
172 | adjustment != -RWSEM_ACTIVE_READ_BIAS)) | |
fe6e674c | 173 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
1da177e4 | 174 | |
ddb6c9b5 | 175 | raw_spin_unlock_irq(&sem->wait_lock); |
1da177e4 LT |
176 | |
177 | /* wait to be given the lock */ | |
f7dd1cee ML |
178 | while (true) { |
179 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
a8618a0e | 180 | if (!waiter.task) |
1da177e4 LT |
181 | break; |
182 | schedule(); | |
1da177e4 LT |
183 | } |
184 | ||
185 | tsk->state = TASK_RUNNING; | |
186 | ||
187 | return sem; | |
188 | } | |
189 | ||
1da177e4 | 190 | /* |
023fe4f7 | 191 | * wait until we successfully acquire the write lock |
1da177e4 | 192 | */ |
d1233754 | 193 | struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) |
1da177e4 | 194 | { |
b5f54181 | 195 | long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS; |
1e78277c ML |
196 | struct rwsem_waiter waiter; |
197 | struct task_struct *tsk = current; | |
1e78277c ML |
198 | |
199 | /* set up my own style of waitqueue */ | |
200 | waiter.task = tsk; | |
023fe4f7 | 201 | waiter.type = RWSEM_WAITING_FOR_WRITE; |
1e78277c ML |
202 | |
203 | raw_spin_lock_irq(&sem->wait_lock); | |
204 | if (list_empty(&sem->wait_list)) | |
205 | adjustment += RWSEM_WAITING_BIAS; | |
206 | list_add_tail(&waiter.list, &sem->wait_list); | |
207 | ||
208 | /* we're now waiting on the lock, but no longer actively locking */ | |
209 | count = rwsem_atomic_update(adjustment, sem); | |
210 | ||
ed00f643 ML |
211 | /* If there were already threads queued before us and there are no |
212 | * active writers, the lock must be read owned; so we try to wake | |
213 | * any read locks that were queued ahead of us. */ | |
214 | if (count > RWSEM_WAITING_BIAS && | |
215 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | |
fe6e674c | 216 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS); |
1e78277c | 217 | |
023fe4f7 | 218 | /* wait until we successfully acquire the lock */ |
a7d2c573 | 219 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
1e78277c | 220 | while (true) { |
9b0fc9c0 ML |
221 | if (!(count & RWSEM_ACTIVE_MASK)) { |
222 | /* Try acquiring the write lock. */ | |
223 | count = RWSEM_ACTIVE_WRITE_BIAS; | |
224 | if (!list_is_singular(&sem->wait_list)) | |
225 | count += RWSEM_WAITING_BIAS; | |
9607a85b DB |
226 | |
227 | if (sem->count == RWSEM_WAITING_BIAS && | |
228 | cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) == | |
5ede972d | 229 | RWSEM_WAITING_BIAS) |
9b0fc9c0 ML |
230 | break; |
231 | } | |
1e78277c | 232 | |
1e78277c | 233 | raw_spin_unlock_irq(&sem->wait_lock); |
a7d2c573 ML |
234 | |
235 | /* Block until there are no active lockers. */ | |
236 | do { | |
237 | schedule(); | |
238 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
9b0fc9c0 | 239 | } while ((count = sem->count) & RWSEM_ACTIVE_MASK); |
a7d2c573 | 240 | |
023fe4f7 | 241 | raw_spin_lock_irq(&sem->wait_lock); |
1e78277c ML |
242 | } |
243 | ||
023fe4f7 ML |
244 | list_del(&waiter.list); |
245 | raw_spin_unlock_irq(&sem->wait_lock); | |
1e78277c ML |
246 | tsk->state = TASK_RUNNING; |
247 | ||
248 | return sem; | |
1da177e4 LT |
249 | } |
250 | ||
251 | /* | |
252 | * handle waking up a waiter on the semaphore | |
253 | * - up_read/up_write has decremented the active part of count if we come here | |
254 | */ | |
d1233754 | 255 | struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) |
1da177e4 LT |
256 | { |
257 | unsigned long flags; | |
258 | ||
ddb6c9b5 | 259 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
260 | |
261 | /* do nothing if list empty */ | |
262 | if (!list_empty(&sem->wait_list)) | |
70bdc6e0 | 263 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
1da177e4 | 264 | |
ddb6c9b5 | 265 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 | 266 | |
1da177e4 LT |
267 | return sem; |
268 | } | |
269 | ||
270 | /* | |
271 | * downgrade a write lock into a read lock | |
272 | * - caller incremented waiting part of count and discovered it still negative | |
273 | * - just wake up any readers at the front of the queue | |
274 | */ | |
d1233754 | 275 | struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) |
1da177e4 LT |
276 | { |
277 | unsigned long flags; | |
278 | ||
ddb6c9b5 | 279 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
280 | |
281 | /* do nothing if list empty */ | |
282 | if (!list_empty(&sem->wait_list)) | |
70bdc6e0 | 283 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
1da177e4 | 284 | |
ddb6c9b5 | 285 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 | 286 | |
1da177e4 LT |
287 | return sem; |
288 | } | |
289 | ||
290 | EXPORT_SYMBOL(rwsem_down_read_failed); | |
291 | EXPORT_SYMBOL(rwsem_down_write_failed); | |
292 | EXPORT_SYMBOL(rwsem_wake); | |
293 | EXPORT_SYMBOL(rwsem_downgrade_wake); |