1 // SPDX-License-Identifier: GPL-2.0
5 * The ldisc semaphore is semantically a rw_semaphore but which enforces
6 * an alternate policy, namely:
7 * 1) Supports lock wait timeouts
8 * 2) Write waiter has priority
9 * 3) Downgrading is not supported
11 * Implementation notes:
12 * 1) Upper half of semaphore count is a wait count (differs from rwsem
13 * in that rwsem normalizes the upper half to the wait bias)
14 * 2) Lacks overflow checking
16 * The generic counting was copied and modified from include/asm-generic/rwsem.h
19 * The scheduling policy was copied and modified from lib/rwsem.c
22 * This implementation incorporates the write lock stealing work of
28 #include <linux/list.h>
29 #include <linux/spinlock.h>
30 #include <linux/atomic.h>
31 #include <linux/tty.h>
32 #include <linux/sched.h>
33 #include <linux/sched/debug.h>
34 #include <linux/sched/task.h>
37 #if BITS_PER_LONG == 64
38 # define LDSEM_ACTIVE_MASK 0xffffffffL
40 # define LDSEM_ACTIVE_MASK 0x0000ffffL
43 #define LDSEM_UNLOCKED 0L
44 #define LDSEM_ACTIVE_BIAS 1L
45 #define LDSEM_WAIT_BIAS (-LDSEM_ACTIVE_MASK-1)
46 #define LDSEM_READ_BIAS LDSEM_ACTIVE_BIAS
47 #define LDSEM_WRITE_BIAS (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
50 struct list_head list;
51 struct task_struct *task;
55 * Initialize an ldsem:
57 void __init_ldsem(struct ld_semaphore *sem, const char *name,
58 struct lock_class_key *key)
60 #ifdef CONFIG_DEBUG_LOCK_ALLOC
62 * Make sure we are not reinitializing a held semaphore:
64 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
65 lockdep_init_map(&sem->dep_map, name, key, 0);
67 atomic_long_set(&sem->count, LDSEM_UNLOCKED);
68 sem->wait_readers = 0;
69 raw_spin_lock_init(&sem->wait_lock);
70 INIT_LIST_HEAD(&sem->read_wait);
71 INIT_LIST_HEAD(&sem->write_wait);
74 static void __ldsem_wake_readers(struct ld_semaphore *sem)
76 struct ldsem_waiter *waiter, *next;
77 struct task_struct *tsk;
81 * Try to grant read locks to all readers on the read wait list.
82 * Note the 'active part' of the count is incremented by
83 * the number of readers before waking any processes up.
85 adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
86 count = atomic_long_add_return(adjust, &sem->count);
90 if (atomic_long_try_cmpxchg(&sem->count, &count, count - adjust))
94 list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
101 INIT_LIST_HEAD(&sem->read_wait);
102 sem->wait_readers = 0;
105 static inline int writer_trylock(struct ld_semaphore *sem)
108 * Only wake this writer if the active part of the count can be
109 * transitioned from 0 -> 1
111 long count = atomic_long_add_return(LDSEM_ACTIVE_BIAS, &sem->count);
113 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
115 if (atomic_long_try_cmpxchg(&sem->count, &count, count - LDSEM_ACTIVE_BIAS))
120 static void __ldsem_wake_writer(struct ld_semaphore *sem)
122 struct ldsem_waiter *waiter;
124 waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
125 wake_up_process(waiter->task);
129 * handle the lock release when processes blocked on it that can now run
130 * - if we come here from up_xxxx(), then:
131 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
132 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
133 * - the spinlock must be held by the caller
134 * - woken process blocks are discarded from the list after having task zeroed
136 static void __ldsem_wake(struct ld_semaphore *sem)
138 if (!list_empty(&sem->write_wait))
139 __ldsem_wake_writer(sem);
140 else if (!list_empty(&sem->read_wait))
141 __ldsem_wake_readers(sem);
144 static void ldsem_wake(struct ld_semaphore *sem)
148 raw_spin_lock_irqsave(&sem->wait_lock, flags);
150 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
154 * wait for the read lock to be granted
156 static struct ld_semaphore __sched *
157 down_read_failed(struct ld_semaphore *sem, long count, long timeout)
159 struct ldsem_waiter waiter;
160 long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
162 /* set up my own style of waitqueue */
163 raw_spin_lock_irq(&sem->wait_lock);
166 * Try to reverse the lock attempt but if the count has changed
167 * so that reversing fails, check if there are are no waiters,
168 * and early-out if not
171 if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust)) {
176 raw_spin_unlock_irq(&sem->wait_lock);
181 list_add_tail(&waiter.list, &sem->read_wait);
184 waiter.task = current;
185 get_task_struct(current);
187 /* if there are no active locks, wake the new lock owner(s) */
188 if ((count & LDSEM_ACTIVE_MASK) == 0)
191 raw_spin_unlock_irq(&sem->wait_lock);
193 /* wait to be given the lock */
195 set_current_state(TASK_UNINTERRUPTIBLE);
201 timeout = schedule_timeout(timeout);
204 __set_current_state(TASK_RUNNING);
208 * Lock timed out but check if this task was just
209 * granted lock ownership - if so, pretend there
210 * was no timeout; otherwise, cleanup lock wait.
212 raw_spin_lock_irq(&sem->wait_lock);
214 atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
216 list_del(&waiter.list);
217 raw_spin_unlock_irq(&sem->wait_lock);
218 put_task_struct(waiter.task);
221 raw_spin_unlock_irq(&sem->wait_lock);
228 * wait for the write lock to be granted
230 static struct ld_semaphore __sched *
231 down_write_failed(struct ld_semaphore *sem, long count, long timeout)
233 struct ldsem_waiter waiter;
234 long adjust = -LDSEM_ACTIVE_BIAS;
237 /* set up my own style of waitqueue */
238 raw_spin_lock_irq(&sem->wait_lock);
241 * Try to reverse the lock attempt but if the count has changed
242 * so that reversing fails, check if the lock is now owned,
243 * and early-out if so.
246 if (atomic_long_try_cmpxchg(&sem->count, &count, count + adjust))
248 if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
249 raw_spin_unlock_irq(&sem->wait_lock);
254 list_add_tail(&waiter.list, &sem->write_wait);
256 waiter.task = current;
258 set_current_state(TASK_UNINTERRUPTIBLE);
262 raw_spin_unlock_irq(&sem->wait_lock);
263 timeout = schedule_timeout(timeout);
264 raw_spin_lock_irq(&sem->wait_lock);
265 set_current_state(TASK_UNINTERRUPTIBLE);
266 locked = writer_trylock(sem);
272 atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
273 list_del(&waiter.list);
276 * In case of timeout, wake up every reader who gave the right of way
277 * to writer. Prevent separation readers into two groups:
278 * one that helds semaphore and another that sleeps.
279 * (in case of no contention with a writer)
281 if (!locked && list_empty(&sem->write_wait))
282 __ldsem_wake_readers(sem);
284 raw_spin_unlock_irq(&sem->wait_lock);
286 __set_current_state(TASK_RUNNING);
288 /* lock wait may have timed out */
296 static int __ldsem_down_read_nested(struct ld_semaphore *sem,
297 int subclass, long timeout)
301 rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
303 count = atomic_long_add_return(LDSEM_READ_BIAS, &sem->count);
305 lock_contended(&sem->dep_map, _RET_IP_);
306 if (!down_read_failed(sem, count, timeout)) {
307 rwsem_release(&sem->dep_map, 1, _RET_IP_);
311 lock_acquired(&sem->dep_map, _RET_IP_);
315 static int __ldsem_down_write_nested(struct ld_semaphore *sem,
316 int subclass, long timeout)
320 rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
322 count = atomic_long_add_return(LDSEM_WRITE_BIAS, &sem->count);
323 if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
324 lock_contended(&sem->dep_map, _RET_IP_);
325 if (!down_write_failed(sem, count, timeout)) {
326 rwsem_release(&sem->dep_map, 1, _RET_IP_);
330 lock_acquired(&sem->dep_map, _RET_IP_);
336 * lock for reading -- returns 1 if successful, 0 if timed out
338 int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
341 return __ldsem_down_read_nested(sem, 0, timeout);
345 * trylock for reading -- returns 1 if successful, 0 if contention
347 int ldsem_down_read_trylock(struct ld_semaphore *sem)
349 long count = atomic_long_read(&sem->count);
352 if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_READ_BIAS)) {
353 rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
354 lock_acquired(&sem->dep_map, _RET_IP_);
362 * lock for writing -- returns 1 if successful, 0 if timed out
364 int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
367 return __ldsem_down_write_nested(sem, 0, timeout);
371 * trylock for writing -- returns 1 if successful, 0 if contention
373 int ldsem_down_write_trylock(struct ld_semaphore *sem)
375 long count = atomic_long_read(&sem->count);
377 while ((count & LDSEM_ACTIVE_MASK) == 0) {
378 if (atomic_long_try_cmpxchg(&sem->count, &count, count + LDSEM_WRITE_BIAS)) {
379 rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
380 lock_acquired(&sem->dep_map, _RET_IP_);
388 * release a read lock
390 void ldsem_up_read(struct ld_semaphore *sem)
394 rwsem_release(&sem->dep_map, 1, _RET_IP_);
396 count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count);
397 if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
402 * release a write lock
404 void ldsem_up_write(struct ld_semaphore *sem)
408 rwsem_release(&sem->dep_map, 1, _RET_IP_);
410 count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count);
416 #ifdef CONFIG_DEBUG_LOCK_ALLOC
418 int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
421 return __ldsem_down_read_nested(sem, subclass, timeout);
424 int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
428 return __ldsem_down_write_nested(sem, subclass, timeout);