1 // SPDX-License-Identifier: GPL-2.0-only
3 * kernel/locking/mutex.c
5 * Mutexes: blocking mutual exclusion locks
7 * Started by Ingo Molnar:
11 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
12 * David Howells for suggestions and improvements.
14 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
15 * from the -rt tree, where it was originally implemented for rtmutexes
16 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
19 * Also see Documentation/locking/mutex-design.rst.
21 #include <linux/mutex.h>
22 #include <linux/ww_mutex.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/rt.h>
25 #include <linux/sched/wake_q.h>
26 #include <linux/sched/debug.h>
27 #include <linux/export.h>
28 #include <linux/spinlock.h>
29 #include <linux/interrupt.h>
30 #include <linux/debug_locks.h>
31 #include <linux/osq_lock.h>
33 #ifdef CONFIG_DEBUG_MUTEXES
34 # include "mutex-debug.h"
40 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
42 atomic_long_set(&lock->owner, 0);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
45 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
46 osq_lock_init(&lock->osq);
49 debug_mutex_init(lock, name, key);
51 EXPORT_SYMBOL(__mutex_init);
54 * @owner: contains: 'struct task_struct *' to the current lock owner,
55 * NULL means not owned. Since task_struct pointers are aligned at
56 * at least L1_CACHE_BYTES, we have low bits to store extra state.
58 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
59 * Bit1 indicates unlock needs to hand the lock to the top-waiter
60 * Bit2 indicates handoff has been done and we're waiting for pickup.
62 #define MUTEX_FLAG_WAITERS 0x01
63 #define MUTEX_FLAG_HANDOFF 0x02
64 #define MUTEX_FLAG_PICKUP 0x04
66 #define MUTEX_FLAGS 0x07
69 * Internal helper function; C doesn't allow us to hide it :/
71 * DO NOT USE (outside of mutex code).
73 static inline struct task_struct *__mutex_owner(struct mutex *lock)
75 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
78 static inline struct task_struct *__owner_task(unsigned long owner)
80 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
83 bool mutex_is_locked(struct mutex *lock)
85 return __mutex_owner(lock) != NULL;
87 EXPORT_SYMBOL(mutex_is_locked);
89 __must_check enum mutex_trylock_recursive_enum
90 mutex_trylock_recursive(struct mutex *lock)
92 if (unlikely(__mutex_owner(lock) == current))
93 return MUTEX_TRYLOCK_RECURSIVE;
95 return mutex_trylock(lock);
97 EXPORT_SYMBOL(mutex_trylock_recursive);
99 static inline unsigned long __owner_flags(unsigned long owner)
101 return owner & MUTEX_FLAGS;
105 * Trylock variant that retuns the owning task on failure.
107 static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
109 unsigned long owner, curr = (unsigned long)current;
111 owner = atomic_long_read(&lock->owner);
112 for (;;) { /* must loop, can race against a flag */
113 unsigned long old, flags = __owner_flags(owner);
114 unsigned long task = owner & ~MUTEX_FLAGS;
117 if (likely(task != curr))
120 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
123 flags &= ~MUTEX_FLAG_PICKUP;
125 #ifdef CONFIG_DEBUG_MUTEXES
126 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
131 * We set the HANDOFF bit, we must make sure it doesn't live
132 * past the point where we acquire it. This would be possible
133 * if we (accidentally) set the bit on an unlocked mutex.
135 flags &= ~MUTEX_FLAG_HANDOFF;
137 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
144 return __owner_task(owner);
148 * Actual trylock that will work on any unlocked state.
150 static inline bool __mutex_trylock(struct mutex *lock)
152 return !__mutex_trylock_or_owner(lock);
155 #ifndef CONFIG_DEBUG_LOCK_ALLOC
157 * Lockdep annotations are contained to the slow paths for simplicity.
158 * There is nothing that would stop spreading the lockdep annotations outwards
163 * Optimistic trylock that only works in the uncontended case. Make sure to
164 * follow with a __mutex_trylock() before failing.
166 static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
168 unsigned long curr = (unsigned long)current;
169 unsigned long zero = 0UL;
171 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
177 static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
179 unsigned long curr = (unsigned long)current;
181 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
188 static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
190 atomic_long_or(flag, &lock->owner);
193 static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
195 atomic_long_andnot(flag, &lock->owner);
198 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
200 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
204 * Add @waiter to a given location in the lock wait_list and set the
205 * FLAG_WAITERS flag if it's the first waiter.
208 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
209 struct list_head *list)
211 debug_mutex_add_waiter(lock, waiter, current);
213 list_add_tail(&waiter->list, list);
214 if (__mutex_waiter_is_first(lock, waiter))
215 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
219 * Give up ownership to a specific task, when @task = NULL, this is equivalent
220 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
221 * WAITERS. Provides RELEASE semantics like a regular unlock, the
222 * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
224 static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
226 unsigned long owner = atomic_long_read(&lock->owner);
229 unsigned long old, new;
231 #ifdef CONFIG_DEBUG_MUTEXES
232 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
233 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
236 new = (owner & MUTEX_FLAG_WAITERS);
237 new |= (unsigned long)task;
239 new |= MUTEX_FLAG_PICKUP;
241 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
249 #ifndef CONFIG_DEBUG_LOCK_ALLOC
251 * We split the mutex lock/unlock logic into separate fastpath and
252 * slowpath functions, to reduce the register pressure on the fastpath.
253 * We also put the fastpath first in the kernel image, to make sure the
254 * branch is predicted by the CPU as default-untaken.
256 static void __sched __mutex_lock_slowpath(struct mutex *lock);
259 * mutex_lock - acquire the mutex
260 * @lock: the mutex to be acquired
262 * Lock the mutex exclusively for this task. If the mutex is not
263 * available right now, it will sleep until it can get it.
265 * The mutex must later on be released by the same task that
266 * acquired it. Recursive locking is not allowed. The task
267 * may not exit without first unlocking the mutex. Also, kernel
268 * memory where the mutex resides must not be freed with
269 * the mutex still locked. The mutex must first be initialized
270 * (or statically defined) before it can be locked. memset()-ing
271 * the mutex to 0 is not allowed.
273 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
274 * checks that will enforce the restrictions and will also do
275 * deadlock debugging)
277 * This function is similar to (but not equivalent to) down().
279 void __sched mutex_lock(struct mutex *lock)
283 if (!__mutex_trylock_fast(lock))
284 __mutex_lock_slowpath(lock);
286 EXPORT_SYMBOL(mutex_lock);
291 * The newer transactions are killed when:
292 * It (the new transaction) makes a request for a lock being held
293 * by an older transaction.
296 * The newer transactions are wounded when:
297 * An older transaction makes a request for a lock being held by
298 * the newer transaction.
302 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
305 static __always_inline void
306 ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
308 #ifdef CONFIG_DEBUG_MUTEXES
310 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
311 * but released with a normal mutex_unlock in this call.
313 * This should never happen, always use ww_mutex_unlock.
315 DEBUG_LOCKS_WARN_ON(ww->ctx);
318 * Not quite done after calling ww_acquire_done() ?
320 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
322 if (ww_ctx->contending_lock) {
324 * After -EDEADLK you tried to
325 * acquire a different ww_mutex? Bad!
327 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
330 * You called ww_mutex_lock after receiving -EDEADLK,
331 * but 'forgot' to unlock everything else first?
333 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
334 ww_ctx->contending_lock = NULL;
338 * Naughty, using a different class will lead to undefined behavior!
340 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
347 * Determine if context @a is 'after' context @b. IOW, @a is a younger
348 * transaction than @b and depending on algorithm either needs to wait for
351 static inline bool __sched
352 __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
355 return (signed long)(a->stamp - b->stamp) > 0;
359 * Wait-Die; wake a younger waiter context (when locks held) such that it can
362 * Among waiters with context, only the first one can have other locks acquired
363 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
364 * __ww_mutex_check_kill() wake any but the earliest context.
367 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
368 struct ww_acquire_ctx *ww_ctx)
370 if (!ww_ctx->is_wait_die)
373 if (waiter->ww_ctx->acquired > 0 &&
374 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
375 debug_mutex_wake_waiter(lock, waiter);
376 wake_up_process(waiter->task);
383 * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
385 * Wound the lock holder if there are waiters with older transactions than
386 * the lock holders. Even if multiple waiters may wound the lock holder,
387 * it's sufficient that only one does.
389 static bool __ww_mutex_wound(struct mutex *lock,
390 struct ww_acquire_ctx *ww_ctx,
391 struct ww_acquire_ctx *hold_ctx)
393 struct task_struct *owner = __mutex_owner(lock);
395 lockdep_assert_held(&lock->wait_lock);
398 * Possible through __ww_mutex_add_waiter() when we race with
399 * ww_mutex_set_context_fastpath(). In that case we'll get here again
400 * through __ww_mutex_check_waiters().
406 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
407 * it cannot go away because we'll have FLAG_WAITERS set and hold
413 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
414 hold_ctx->wounded = 1;
417 * wake_up_process() paired with set_current_state()
418 * inserts sufficient barriers to make sure @owner either sees
419 * it's wounded in __ww_mutex_check_kill() or has a
420 * wakeup pending to re-read the wounded state.
422 if (owner != current)
423 wake_up_process(owner);
432 * We just acquired @lock under @ww_ctx, if there are later contexts waiting
433 * behind us on the wait-list, check if they need to die, or wound us.
435 * See __ww_mutex_add_waiter() for the list-order construction; basically the
436 * list is ordered by stamp, smallest (oldest) first.
438 * This relies on never mixing wait-die/wound-wait on the same wait-list;
439 * which is currently ensured by that being a ww_class property.
441 * The current task must not be on the wait list.
444 __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
446 struct mutex_waiter *cur;
448 lockdep_assert_held(&lock->wait_lock);
450 list_for_each_entry(cur, &lock->wait_list, list) {
454 if (__ww_mutex_die(lock, cur, ww_ctx) ||
455 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
461 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
462 * and wake up any waiters so they can recheck.
464 static __always_inline void
465 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
467 ww_mutex_lock_acquired(lock, ctx);
470 * The lock->ctx update should be visible on all cores before
471 * the WAITERS check is done, otherwise contended waiters might be
472 * missed. The contended waiters will either see ww_ctx == NULL
473 * and keep spinning, or it will acquire wait_lock, add itself
474 * to waiter list and sleep.
476 smp_mb(); /* See comments above and below. */
479 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
481 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
483 * The memory barrier above pairs with the memory barrier in
484 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
485 * and/or !empty list.
487 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
491 * Uh oh, we raced in fastpath, check if any of the waiters need to
494 spin_lock(&lock->base.wait_lock);
495 __ww_mutex_check_waiters(&lock->base, ctx);
496 spin_unlock(&lock->base.wait_lock);
499 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
502 bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
503 struct mutex_waiter *waiter)
507 ww = container_of(lock, struct ww_mutex, base);
510 * If ww->ctx is set the contents are undefined, only
511 * by acquiring wait_lock there is a guarantee that
512 * they are not invalid when reading.
514 * As such, when deadlock detection needs to be
515 * performed the optimistic spinning cannot be done.
517 * Check this in every inner iteration because we may
518 * be racing against another thread's ww_mutex_lock.
520 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
524 * If we aren't on the wait list yet, cancel the spin
525 * if there are waiters. We want to avoid stealing the
526 * lock from a waiter with an earlier stamp, since the
527 * other thread may already own a lock that we also
530 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
534 * Similarly, stop spinning if we are no longer the
537 if (waiter && !__mutex_waiter_is_first(lock, waiter))
544 * Look out! "owner" is an entirely speculative pointer access and not
547 * "noinline" so that this function shows up on perf profiles.
550 bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
551 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
556 while (__mutex_owner(lock) == owner) {
558 * Ensure we emit the owner->on_cpu, dereference _after_
559 * checking lock->owner still matches owner. If that fails,
560 * owner might point to freed memory. If it still matches,
561 * the rcu_read_lock() ensures the memory stays valid.
566 * Use vcpu_is_preempted to detect lock holder preemption issue.
568 if (!owner->on_cpu || need_resched() ||
569 vcpu_is_preempted(task_cpu(owner))) {
574 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
587 * Initial check for entering the mutex spinning loop
589 static inline int mutex_can_spin_on_owner(struct mutex *lock)
591 struct task_struct *owner;
598 owner = __mutex_owner(lock);
601 * As lock holder preemption issue, we both skip spinning if task is not
602 * on cpu or its cpu is preempted
605 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
609 * If lock->owner is not set, the mutex has been released. Return true
610 * such that we'll trylock in the spin path, which is a faster option
611 * than the blocking slow path.
617 * Optimistic spinning.
619 * We try to spin for acquisition when we find that the lock owner
620 * is currently running on a (different) CPU and while we don't
621 * need to reschedule. The rationale is that if the lock owner is
622 * running, it is likely to release the lock soon.
624 * The mutex spinners are queued up using MCS lock so that only one
625 * spinner can compete for the mutex. However, if mutex spinning isn't
626 * going to happen, there is no point in going through the lock/unlock
629 * Returns true when the lock was taken, otherwise false, indicating
630 * that we need to jump to the slowpath and sleep.
632 * The waiter flag is set to true if the spinner is a waiter in the wait
633 * queue. The waiter-spinner will spin on the lock directly and concurrently
634 * with the spinner at the head of the OSQ, if present, until the owner is
637 static __always_inline bool
638 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
639 const bool use_ww_ctx, struct mutex_waiter *waiter)
643 * The purpose of the mutex_can_spin_on_owner() function is
644 * to eliminate the overhead of osq_lock() and osq_unlock()
645 * in case spinning isn't possible. As a waiter-spinner
646 * is not going to take OSQ lock anyway, there is no need
647 * to call mutex_can_spin_on_owner().
649 if (!mutex_can_spin_on_owner(lock))
653 * In order to avoid a stampede of mutex spinners trying to
654 * acquire the mutex all at once, the spinners need to take a
655 * MCS (queued) lock first before spinning on the owner field.
657 if (!osq_lock(&lock->osq))
662 struct task_struct *owner;
664 /* Try to acquire the mutex... */
665 owner = __mutex_trylock_or_owner(lock);
670 * There's an owner, wait for it to either
671 * release the lock or go to sleep.
673 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
677 * The cpu_relax() call is a compiler barrier which forces
678 * everything in this loop to be re-loaded. We don't need
679 * memory barriers as we'll eventually observe the right
680 * values at the cost of a few extra spins.
686 osq_unlock(&lock->osq);
693 osq_unlock(&lock->osq);
697 * If we fell out of the spin path because of need_resched(),
698 * reschedule now, before we try-lock the mutex. This avoids getting
699 * scheduled out right after we obtained the mutex.
701 if (need_resched()) {
703 * We _should_ have TASK_RUNNING here, but just in case
704 * we do not, make it so, otherwise we might get stuck.
706 __set_current_state(TASK_RUNNING);
707 schedule_preempt_disabled();
713 static __always_inline bool
714 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
715 const bool use_ww_ctx, struct mutex_waiter *waiter)
721 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
724 * mutex_unlock - release the mutex
725 * @lock: the mutex to be released
727 * Unlock a mutex that has been locked by this task previously.
729 * This function must not be used in interrupt context. Unlocking
730 * of a not locked mutex is not allowed.
732 * This function is similar to (but not equivalent to) up().
734 void __sched mutex_unlock(struct mutex *lock)
736 #ifdef CONFIG_DEBUG_MUTEXES
737 WARN_ON(in_interrupt());
739 #ifndef CONFIG_DEBUG_LOCK_ALLOC
740 if (__mutex_unlock_fast(lock))
743 __mutex_unlock_slowpath(lock, _RET_IP_);
745 EXPORT_SYMBOL(mutex_unlock);
748 * ww_mutex_unlock - release the w/w mutex
749 * @lock: the mutex to be released
751 * Unlock a mutex that has been locked by this task previously with any of the
752 * ww_mutex_lock* functions (with or without an acquire context). It is
753 * forbidden to release the locks after releasing the acquire context.
755 * This function must not be used in interrupt context. Unlocking
756 * of a unlocked mutex is not allowed.
758 void __sched ww_mutex_unlock(struct ww_mutex *lock)
761 * The unlocking fastpath is the 0->1 transition from 'locked'
762 * into 'unlocked' state:
765 #ifdef CONFIG_DEBUG_MUTEXES
766 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
768 if (lock->ctx->acquired > 0)
769 lock->ctx->acquired--;
773 mutex_unlock(&lock->base);
775 EXPORT_SYMBOL(ww_mutex_unlock);
778 static __always_inline int __sched
779 __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
781 if (ww_ctx->acquired > 0) {
782 #ifdef CONFIG_DEBUG_MUTEXES
785 ww = container_of(lock, struct ww_mutex, base);
786 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
787 ww_ctx->contending_lock = ww;
797 * Check the wound condition for the current lock acquire.
799 * Wound-Wait: If we're wounded, kill ourself.
801 * Wait-Die: If we're trying to acquire a lock already held by an older
802 * context, kill ourselves.
804 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
805 * look at waiters before us in the wait-list.
807 static inline int __sched
808 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
809 struct ww_acquire_ctx *ctx)
811 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
812 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
813 struct mutex_waiter *cur;
815 if (ctx->acquired == 0)
818 if (!ctx->is_wait_die) {
820 return __ww_mutex_kill(lock, ctx);
825 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
826 return __ww_mutex_kill(lock, ctx);
829 * If there is a waiter in front of us that has a context, then its
830 * stamp is earlier than ours and we must kill ourself.
833 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
837 return __ww_mutex_kill(lock, ctx);
844 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
845 * first. Such that older contexts are preferred to acquire the lock over
848 * Waiters without context are interspersed in FIFO order.
850 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
851 * older contexts already waiting) to avoid unnecessary waiting and for
852 * Wound-Wait ensure we wound the owning context when it is younger.
854 static inline int __sched
855 __ww_mutex_add_waiter(struct mutex_waiter *waiter,
857 struct ww_acquire_ctx *ww_ctx)
859 struct mutex_waiter *cur;
860 struct list_head *pos;
864 __mutex_add_waiter(lock, waiter, &lock->wait_list);
868 is_wait_die = ww_ctx->is_wait_die;
871 * Add the waiter before the first waiter with a higher stamp.
872 * Waiters without a context are skipped to avoid starving
873 * them. Wait-Die waiters may die here. Wound-Wait waiters
874 * never die here, but they are sorted in stamp order and
875 * may wound the lock holder.
877 pos = &lock->wait_list;
878 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
882 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
884 * Wait-Die: if we find an older context waiting, there
885 * is no point in queueing behind it, as we'd have to
886 * die the moment it would acquire the lock.
889 int ret = __ww_mutex_kill(lock, ww_ctx);
900 /* Wait-Die: ensure younger waiters die. */
901 __ww_mutex_die(lock, cur, ww_ctx);
904 __mutex_add_waiter(lock, waiter, pos);
907 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
908 * wound that such that we might proceed.
911 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
914 * See ww_mutex_set_context_fastpath(). Orders setting
915 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
916 * such that either we or the fastpath will wound @ww->ctx.
919 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
926 * Lock a mutex (possibly interruptible), slowpath:
928 static __always_inline int __sched
929 __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
930 struct lockdep_map *nest_lock, unsigned long ip,
931 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
933 struct mutex_waiter waiter;
940 #ifdef CONFIG_DEBUG_MUTEXES
941 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
944 ww = container_of(lock, struct ww_mutex, base);
945 if (use_ww_ctx && ww_ctx) {
946 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
950 * Reset the wounded flag after a kill. No other process can
951 * race and wound us here since they can't have a valid owner
952 * pointer if we don't have any locks held.
954 if (ww_ctx->acquired == 0)
959 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
961 if (__mutex_trylock(lock) ||
962 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
963 /* got the lock, yay! */
964 lock_acquired(&lock->dep_map, ip);
965 if (use_ww_ctx && ww_ctx)
966 ww_mutex_set_context_fastpath(ww, ww_ctx);
971 spin_lock(&lock->wait_lock);
973 * After waiting to acquire the wait_lock, try again.
975 if (__mutex_trylock(lock)) {
976 if (use_ww_ctx && ww_ctx)
977 __ww_mutex_check_waiters(lock, ww_ctx);
982 debug_mutex_lock_common(lock, &waiter);
984 lock_contended(&lock->dep_map, ip);
987 /* add waiting tasks to the end of the waitqueue (FIFO): */
988 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
991 #ifdef CONFIG_DEBUG_MUTEXES
992 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
996 * Add in stamp order, waking up waiters that must kill
999 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
1001 goto err_early_kill;
1003 waiter.ww_ctx = ww_ctx;
1006 waiter.task = current;
1008 set_current_state(state);
1011 * Once we hold wait_lock, we're serialized against
1012 * mutex_unlock() handing the lock off to us, do a trylock
1013 * before testing the error conditions to make sure we pick up
1016 if (__mutex_trylock(lock))
1020 * Check for signals and kill conditions while holding
1021 * wait_lock. This ensures the lock cancellation is ordered
1022 * against mutex_unlock() and wake-ups do not go missing.
1024 if (signal_pending_state(state, current)) {
1029 if (use_ww_ctx && ww_ctx) {
1030 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1035 spin_unlock(&lock->wait_lock);
1036 schedule_preempt_disabled();
1039 * ww_mutex needs to always recheck its position since its waiter
1040 * list is not FIFO ordered.
1042 if ((use_ww_ctx && ww_ctx) || !first) {
1043 first = __mutex_waiter_is_first(lock, &waiter);
1045 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1048 set_current_state(state);
1050 * Here we order against unlock; we must either see it change
1051 * state back to RUNNING and fall through the next schedule(),
1052 * or we must see its unlock and acquire.
1054 if (__mutex_trylock(lock) ||
1055 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
1058 spin_lock(&lock->wait_lock);
1060 spin_lock(&lock->wait_lock);
1062 __set_current_state(TASK_RUNNING);
1064 if (use_ww_ctx && ww_ctx) {
1066 * Wound-Wait; we stole the lock (!first_waiter), check the
1067 * waiters as anyone might want to wound us.
1069 if (!ww_ctx->is_wait_die &&
1070 !__mutex_waiter_is_first(lock, &waiter))
1071 __ww_mutex_check_waiters(lock, ww_ctx);
1074 mutex_remove_waiter(lock, &waiter, current);
1075 if (likely(list_empty(&lock->wait_list)))
1076 __mutex_clear_flag(lock, MUTEX_FLAGS);
1078 debug_mutex_free_waiter(&waiter);
1081 /* got the lock - cleanup and rejoice! */
1082 lock_acquired(&lock->dep_map, ip);
1084 if (use_ww_ctx && ww_ctx)
1085 ww_mutex_lock_acquired(ww, ww_ctx);
1087 spin_unlock(&lock->wait_lock);
1092 __set_current_state(TASK_RUNNING);
1093 mutex_remove_waiter(lock, &waiter, current);
1095 spin_unlock(&lock->wait_lock);
1096 debug_mutex_free_waiter(&waiter);
1097 mutex_release(&lock->dep_map, ip);
1103 __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1104 struct lockdep_map *nest_lock, unsigned long ip)
1106 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1110 __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1111 struct lockdep_map *nest_lock, unsigned long ip,
1112 struct ww_acquire_ctx *ww_ctx)
1114 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1117 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1119 mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1121 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1124 EXPORT_SYMBOL_GPL(mutex_lock_nested);
1127 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1129 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1131 EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1134 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1136 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1138 EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1141 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1143 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1145 EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1148 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1154 token = io_schedule_prepare();
1155 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1156 subclass, NULL, _RET_IP_, NULL, 0);
1157 io_schedule_finish(token);
1159 EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1162 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1164 #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1167 if (ctx->deadlock_inject_countdown-- == 0) {
1168 tmp = ctx->deadlock_inject_interval;
1169 if (tmp > UINT_MAX/4)
1172 tmp = tmp*2 + tmp + tmp/2;
1174 ctx->deadlock_inject_interval = tmp;
1175 ctx->deadlock_inject_countdown = tmp;
1176 ctx->contending_lock = lock;
1178 ww_mutex_unlock(lock);
1188 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1193 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1194 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1196 if (!ret && ctx && ctx->acquired > 1)
1197 return ww_mutex_deadlock_injection(lock, ctx);
1201 EXPORT_SYMBOL_GPL(ww_mutex_lock);
1204 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1209 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1210 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1213 if (!ret && ctx && ctx->acquired > 1)
1214 return ww_mutex_deadlock_injection(lock, ctx);
1218 EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1223 * Release the lock, slowpath:
1225 static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1227 struct task_struct *next = NULL;
1228 DEFINE_WAKE_Q(wake_q);
1229 unsigned long owner;
1231 mutex_release(&lock->dep_map, ip);
1234 * Release the lock before (potentially) taking the spinlock such that
1235 * other contenders can get on with things ASAP.
1237 * Except when HANDOFF, in that case we must not clear the owner field,
1238 * but instead set it to the top waiter.
1240 owner = atomic_long_read(&lock->owner);
1244 #ifdef CONFIG_DEBUG_MUTEXES
1245 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1246 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1249 if (owner & MUTEX_FLAG_HANDOFF)
1252 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1253 __owner_flags(owner));
1255 if (owner & MUTEX_FLAG_WAITERS)
1264 spin_lock(&lock->wait_lock);
1265 debug_mutex_unlock(lock);
1266 if (!list_empty(&lock->wait_list)) {
1267 /* get the first entry from the wait-list: */
1268 struct mutex_waiter *waiter =
1269 list_first_entry(&lock->wait_list,
1270 struct mutex_waiter, list);
1272 next = waiter->task;
1274 debug_mutex_wake_waiter(lock, waiter);
1275 wake_q_add(&wake_q, next);
1278 if (owner & MUTEX_FLAG_HANDOFF)
1279 __mutex_handoff(lock, next);
1281 spin_unlock(&lock->wait_lock);
1286 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1288 * Here come the less common (and hence less performance-critical) APIs:
1289 * mutex_lock_interruptible() and mutex_trylock().
1291 static noinline int __sched
1292 __mutex_lock_killable_slowpath(struct mutex *lock);
1294 static noinline int __sched
1295 __mutex_lock_interruptible_slowpath(struct mutex *lock);
1298 * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
1299 * @lock: The mutex to be acquired.
1301 * Lock the mutex like mutex_lock(). If a signal is delivered while the
1302 * process is sleeping, this function will return without acquiring the
1305 * Context: Process context.
1306 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1309 int __sched mutex_lock_interruptible(struct mutex *lock)
1313 if (__mutex_trylock_fast(lock))
1316 return __mutex_lock_interruptible_slowpath(lock);
1319 EXPORT_SYMBOL(mutex_lock_interruptible);
1322 * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
1323 * @lock: The mutex to be acquired.
1325 * Lock the mutex like mutex_lock(). If a signal which will be fatal to
1326 * the current process is delivered while the process is sleeping, this
1327 * function will return without acquiring the mutex.
1329 * Context: Process context.
1330 * Return: 0 if the lock was successfully acquired or %-EINTR if a
1331 * fatal signal arrived.
1333 int __sched mutex_lock_killable(struct mutex *lock)
1337 if (__mutex_trylock_fast(lock))
1340 return __mutex_lock_killable_slowpath(lock);
1342 EXPORT_SYMBOL(mutex_lock_killable);
1345 * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
1346 * @lock: The mutex to be acquired.
1348 * Lock the mutex like mutex_lock(). While the task is waiting for this
1349 * mutex, it will be accounted as being in the IO wait state by the
1352 * Context: Process context.
1354 void __sched mutex_lock_io(struct mutex *lock)
1358 token = io_schedule_prepare();
1360 io_schedule_finish(token);
1362 EXPORT_SYMBOL_GPL(mutex_lock_io);
1364 static noinline void __sched
1365 __mutex_lock_slowpath(struct mutex *lock)
1367 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1370 static noinline int __sched
1371 __mutex_lock_killable_slowpath(struct mutex *lock)
1373 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1376 static noinline int __sched
1377 __mutex_lock_interruptible_slowpath(struct mutex *lock)
1379 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1382 static noinline int __sched
1383 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1385 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1389 static noinline int __sched
1390 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1391 struct ww_acquire_ctx *ctx)
1393 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1400 * mutex_trylock - try to acquire the mutex, without waiting
1401 * @lock: the mutex to be acquired
1403 * Try to acquire the mutex atomically. Returns 1 if the mutex
1404 * has been acquired successfully, and 0 on contention.
1406 * NOTE: this function follows the spin_trylock() convention, so
1407 * it is negated from the down_trylock() return values! Be careful
1408 * about this when converting semaphore users to mutexes.
1410 * This function must not be used in interrupt context. The
1411 * mutex must be released by the same task that acquired it.
1413 int __sched mutex_trylock(struct mutex *lock)
1417 #ifdef CONFIG_DEBUG_MUTEXES
1418 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1419 WARN_ON(in_interrupt());
1422 locked = __mutex_trylock(lock);
1424 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1428 EXPORT_SYMBOL(mutex_trylock);
1430 #ifndef CONFIG_DEBUG_LOCK_ALLOC
1432 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1436 if (__mutex_trylock_fast(&lock->base)) {
1438 ww_mutex_set_context_fastpath(lock, ctx);
1442 return __ww_mutex_lock_slowpath(lock, ctx);
1444 EXPORT_SYMBOL(ww_mutex_lock);
1447 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1451 if (__mutex_trylock_fast(&lock->base)) {
1453 ww_mutex_set_context_fastpath(lock, ctx);
1457 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1459 EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1464 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
1465 * @cnt: the atomic which we are to dec
1466 * @lock: the mutex to return holding if we dec to 0
1468 * return true and hold lock if we dec to 0, return false otherwise
1470 int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1472 /* dec if we can't possibly hit 0 */
1473 if (atomic_add_unless(cnt, -1, 1))
1475 /* we might hit 0, so take the lock */
1477 if (!atomic_dec_and_test(cnt)) {
1478 /* when we actually did the dec, we didn't hit 0 */
1482 /* we hit 0, and we hold the lock */
1485 EXPORT_SYMBOL(atomic_dec_and_mutex_lock);