1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Fast Userspace Mutexes (which I call "Futexes!").
4 * (C) Rusty Russell, IBM 2002
6 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
7 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
9 * Removed page pinning, fix privately mapped COW pages and other cleanups
10 * (C) Copyright 2003, 2004 Jamie Lokier
12 * Robust futex support started by Ingo Molnar
13 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
14 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
16 * PI-futex support started by Ingo Molnar and Thomas Gleixner
20 * PRIVATE futexes by Eric Dumazet
24 * Copyright (C) IBM Corporation, 2009
25 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
27 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
28 * enough at me, Linus for the original (flawed) idea, Matthew
29 * Kirkwood for proof-of-concept implementation.
31 * "The futexes are also cursed."
32 * "But they come in a choice of three flavours!"
34 #include <linux/compat.h>
35 #include <linux/jhash.h>
36 #include <linux/pagemap.h>
37 #include <linux/debugfs.h>
38 #include <linux/plist.h>
39 #include <linux/memblock.h>
40 #include <linux/fault-inject.h>
41 #include <linux/slab.h>
44 #include "../locking/rtmutex_common.h"
47 * The base of the bucket array and its size are always used together
48 * (after initialization only in futex_hash()), so ensure that they
49 * reside in the same cacheline.
52 struct futex_hash_bucket *queues;
53 unsigned long hashsize;
54 } __futex_data __read_mostly __aligned(2*sizeof(long));
55 #define futex_queues (__futex_data.queues)
56 #define futex_hashsize (__futex_data.hashsize)
60 * Fault injections for futexes.
62 #ifdef CONFIG_FAIL_FUTEX
65 struct fault_attr attr;
69 .attr = FAULT_ATTR_INITIALIZER,
70 .ignore_private = false,
73 static int __init setup_fail_futex(char *str)
75 return setup_fault_attr(&fail_futex.attr, str);
77 __setup("fail_futex=", setup_fail_futex);
79 bool should_fail_futex(bool fshared)
81 if (fail_futex.ignore_private && !fshared)
84 return should_fail(&fail_futex.attr, 1);
87 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
89 static int __init fail_futex_debugfs(void)
91 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
94 dir = fault_create_debugfs_attr("fail_futex", NULL,
99 debugfs_create_bool("ignore-private", mode, dir,
100 &fail_futex.ignore_private);
104 late_initcall(fail_futex_debugfs);
106 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
108 #endif /* CONFIG_FAIL_FUTEX */
111 * futex_hash - Return the hash bucket in the global hash
112 * @key: Pointer to the futex key for which the hash is calculated
114 * We hash on the keys returned from get_futex_key (see below) and return the
115 * corresponding hash bucket in the global hash.
117 struct futex_hash_bucket *futex_hash(union futex_key *key)
119 u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
122 return &futex_queues[hash & (futex_hashsize - 1)];
127 * futex_setup_timer - set up the sleeping hrtimer.
128 * @time: ptr to the given timeout value
129 * @timeout: the hrtimer_sleeper structure to be set up
130 * @flags: futex flags
131 * @range_ns: optional range in ns
133 * Return: Initialized hrtimer_sleeper structure or NULL if no timeout
136 struct hrtimer_sleeper *
137 futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
138 int flags, u64 range_ns)
143 hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ?
144 CLOCK_REALTIME : CLOCK_MONOTONIC,
147 * If range_ns is 0, calling hrtimer_set_expires_range_ns() is
148 * effectively the same as calling hrtimer_set_expires().
150 hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns);
156 * Generate a machine wide unique identifier for this inode.
158 * This relies on u64 not wrapping in the life-time of the machine; which with
159 * 1ns resolution means almost 585 years.
161 * This further relies on the fact that a well formed program will not unmap
162 * the file while it has a (shared) futex waiting on it. This mapping will have
163 * a file reference which pins the mount and inode.
165 * If for some reason an inode gets evicted and read back in again, it will get
166 * a new sequence number and will _NOT_ match, even though it is the exact same
169 * It is important that futex_match() will never have a false-positive, esp.
170 * for PI futexes that can mess up the state. The above argues that false-negatives
171 * are only possible for malformed programs.
173 static u64 get_inode_sequence_number(struct inode *inode)
175 static atomic64_t i_seq;
178 /* Does the inode already have a sequence number? */
179 old = atomic64_read(&inode->i_sequence);
184 u64 new = atomic64_add_return(1, &i_seq);
185 if (WARN_ON_ONCE(!new))
188 old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
196 * get_futex_key() - Get parameters which are the keys for a futex
197 * @uaddr: virtual address of the futex
199 * @key: address where result is stored.
200 * @rw: mapping needs to be read/write (values: FUTEX_READ,
203 * Return: a negative error code or 0
205 * The key words are stored in @key on success.
207 * For shared mappings (when @fshared), the key is:
209 * ( inode->i_sequence, page->index, offset_within_page )
211 * [ also see get_inode_sequence_number() ]
213 * For private mappings (or when !@fshared), the key is:
215 * ( current->mm, address, 0 )
217 * This allows (cross process, where applicable) identification of the futex
218 * without keeping the page pinned for the duration of the FUTEX_WAIT.
220 * lock_page() might sleep, the caller should not hold a spinlock.
222 int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key,
223 enum futex_access rw)
225 unsigned long address = (unsigned long)uaddr;
226 struct mm_struct *mm = current->mm;
229 struct address_space *mapping;
233 fshared = flags & FLAGS_SHARED;
236 * The futex address must be "naturally" aligned.
238 key->both.offset = address % PAGE_SIZE;
239 if (unlikely((address % sizeof(u32)) != 0))
241 address -= key->both.offset;
243 if (unlikely(!access_ok(uaddr, sizeof(u32))))
246 if (unlikely(should_fail_futex(fshared)))
250 * PROCESS_PRIVATE futexes are fast.
251 * As the mm cannot disappear under us and the 'key' only needs
252 * virtual address, we dont even have to find the underlying vma.
253 * Note : We do have to check 'uaddr' is a valid user address,
254 * but access_ok() should be faster than find_vma()
258 * On no-MMU, shared futexes are treated as private, therefore
259 * we must not include the current process in the key. Since
260 * there is only one address space, the address is a unique key
263 if (IS_ENABLED(CONFIG_MMU))
264 key->private.mm = mm;
266 key->private.mm = NULL;
268 key->private.address = address;
273 /* Ignore any VERIFY_READ mapping (futex common case) */
274 if (unlikely(should_fail_futex(true)))
277 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page);
279 * If write access is not required (eg. FUTEX_WAIT), try
280 * and get read-only access.
282 if (err == -EFAULT && rw == FUTEX_READ) {
283 err = get_user_pages_fast(address, 1, 0, &page);
292 * The treatment of mapping from this point on is critical. The folio
293 * lock protects many things but in this context the folio lock
294 * stabilizes mapping, prevents inode freeing in the shared
295 * file-backed region case and guards against movement to swap cache.
297 * Strictly speaking the folio lock is not needed in all cases being
298 * considered here and folio lock forces unnecessarily serialization.
299 * From this point on, mapping will be re-verified if necessary and
300 * folio lock will be acquired only if it is unavoidable
302 * Mapping checks require the folio so it is looked up now. For
303 * anonymous pages, it does not matter if the folio is split
304 * in the future as the key is based on the address. For
305 * filesystem-backed pages, the precise page is required as the
306 * index of the page determines the key.
308 folio = page_folio(page);
309 mapping = READ_ONCE(folio->mapping);
312 * If folio->mapping is NULL, then it cannot be an anonymous
313 * page; but it might be the ZERO_PAGE or in the gate area or
314 * in a special mapping (all cases which we are happy to fail);
315 * or it may have been a good file page when get_user_pages_fast
316 * found it, but truncated or holepunched or subjected to
317 * invalidate_complete_page2 before we got the folio lock (also
318 * cases which we are happy to fail). And we hold a reference,
319 * so refcount care in invalidate_inode_page's remove_mapping
320 * prevents drop_caches from setting mapping to NULL beneath us.
322 * The case we do have to guard against is when memory pressure made
323 * shmem_writepage move it from filecache to swapcache beneath us:
324 * an unlikely race, but we do need to retry for folio->mapping.
326 if (unlikely(!mapping)) {
330 * Folio lock is required to identify which special case above
331 * applies. If this is really a shmem page then the folio lock
332 * will prevent unexpected transitions.
335 shmem_swizzled = folio_test_swapcache(folio) || folio->mapping;
346 * Private mappings are handled in a simple way.
348 * If the futex key is stored in anonymous memory, then the associated
349 * object is the mm which is implicitly pinned by the calling process.
351 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
352 * it's a read-only handle, it's expected that futexes attach to
353 * the object not the particular process.
355 if (folio_test_anon(folio)) {
357 * A RO anonymous page will never change and thus doesn't make
358 * sense for futex operations.
360 if (unlikely(should_fail_futex(true)) || ro) {
365 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
366 key->private.mm = mm;
367 key->private.address = address;
373 * The associated futex object in this case is the inode and
374 * the folio->mapping must be traversed. Ordinarily this should
375 * be stabilised under folio lock but it's not strictly
376 * necessary in this case as we just want to pin the inode, not
377 * update i_pages or anything like that.
379 * The RCU read lock is taken as the inode is finally freed
380 * under RCU. If the mapping still matches expectations then the
381 * mapping->host can be safely accessed as being a valid inode.
385 if (READ_ONCE(folio->mapping) != mapping) {
392 inode = READ_ONCE(mapping->host);
400 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
401 key->shared.i_seq = get_inode_sequence_number(inode);
402 key->shared.pgoff = folio->index + folio_page_idx(folio, page);
412 * fault_in_user_writeable() - Fault in user address and verify RW access
413 * @uaddr: pointer to faulting user space address
415 * Slow path to fixup the fault we just took in the atomic write
418 * We have no generic implementation of a non-destructive write to the
419 * user address. We know that we faulted in the atomic pagefault
420 * disabled section so we can as well avoid the #PF overhead by
421 * calling get_user_pages() right away.
423 int fault_in_user_writeable(u32 __user *uaddr)
425 struct mm_struct *mm = current->mm;
429 ret = fixup_user_fault(mm, (unsigned long)uaddr,
430 FAULT_FLAG_WRITE, NULL);
431 mmap_read_unlock(mm);
433 return ret < 0 ? ret : 0;
437 * futex_top_waiter() - Return the highest priority waiter on a futex
438 * @hb: the hash bucket the futex_q's reside in
439 * @key: the futex key (to distinguish it from other futex futex_q's)
441 * Must be called with the hb lock held.
443 struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key)
445 struct futex_q *this;
447 plist_for_each_entry(this, &hb->chain, list) {
448 if (futex_match(&this->key, key))
454 int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval)
459 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
465 int futex_get_value_locked(u32 *dest, u32 __user *from)
470 ret = __get_user(*dest, from);
473 return ret ? -EFAULT : 0;
477 * wait_for_owner_exiting - Block until the owner has exited
478 * @ret: owner's current futex lock status
479 * @exiting: Pointer to the exiting task
481 * Caller must hold a refcount on @exiting.
483 void wait_for_owner_exiting(int ret, struct task_struct *exiting)
486 WARN_ON_ONCE(exiting);
490 if (WARN_ON_ONCE(ret == -EBUSY && !exiting))
493 mutex_lock(&exiting->futex_exit_mutex);
495 * No point in doing state checking here. If the waiter got here
496 * while the task was in exec()->exec_futex_release() then it can
497 * have any FUTEX_STATE_* value when the waiter has acquired the
498 * mutex. OK, if running, EXITING or DEAD if it reached exit()
499 * already. Highly unlikely and not a problem. Just one more round
500 * through the futex maze.
502 mutex_unlock(&exiting->futex_exit_mutex);
504 put_task_struct(exiting);
508 * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket
509 * @q: The futex_q to unqueue
511 * The q->lock_ptr must not be NULL and must be held by the caller.
513 void __futex_unqueue(struct futex_q *q)
515 struct futex_hash_bucket *hb;
517 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
519 lockdep_assert_held(q->lock_ptr);
521 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
522 plist_del(&q->list, &hb->chain);
523 futex_hb_waiters_dec(hb);
526 /* The key must be already stored in q->key. */
527 struct futex_hash_bucket *futex_q_lock(struct futex_q *q)
528 __acquires(&hb->lock)
530 struct futex_hash_bucket *hb;
532 hb = futex_hash(&q->key);
535 * Increment the counter before taking the lock so that
536 * a potential waker won't miss a to-be-slept task that is
537 * waiting for the spinlock. This is safe as all futex_q_lock()
538 * users end up calling futex_queue(). Similarly, for housekeeping,
539 * decrement the counter at futex_q_unlock() when some error has
540 * occurred and we don't end up adding the task to the list.
542 futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */
544 q->lock_ptr = &hb->lock;
546 spin_lock(&hb->lock);
550 void futex_q_unlock(struct futex_hash_bucket *hb)
551 __releases(&hb->lock)
553 spin_unlock(&hb->lock);
554 futex_hb_waiters_dec(hb);
557 void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb)
562 * The priority used to register this element is
563 * - either the real thread-priority for the real-time threads
564 * (i.e. threads with a priority lower than MAX_RT_PRIO)
565 * - or MAX_RT_PRIO for non-RT threads.
566 * Thus, all RT-threads are woken first in priority order, and
567 * the others are woken last, in FIFO order.
569 prio = min(current->normal_prio, MAX_RT_PRIO);
571 plist_node_init(&q->list, prio);
572 plist_add(&q->list, &hb->chain);
577 * futex_unqueue() - Remove the futex_q from its futex_hash_bucket
578 * @q: The futex_q to unqueue
580 * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
581 * be paired with exactly one earlier call to futex_queue().
584 * - 1 - if the futex_q was still queued (and we removed unqueued it);
585 * - 0 - if the futex_q was already removed by the waking thread
587 int futex_unqueue(struct futex_q *q)
589 spinlock_t *lock_ptr;
592 /* In the common case we don't take the spinlock, which is nice. */
595 * q->lock_ptr can change between this read and the following spin_lock.
596 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
597 * optimizing lock_ptr out of the logic below.
599 lock_ptr = READ_ONCE(q->lock_ptr);
600 if (lock_ptr != NULL) {
603 * q->lock_ptr can change between reading it and
604 * spin_lock(), causing us to take the wrong lock. This
605 * corrects the race condition.
607 * Reasoning goes like this: if we have the wrong lock,
608 * q->lock_ptr must have changed (maybe several times)
609 * between reading it and the spin_lock(). It can
610 * change again after the spin_lock() but only if it was
611 * already changed before the spin_lock(). It cannot,
612 * however, change back to the original value. Therefore
613 * we can detect whether we acquired the correct lock.
615 if (unlikely(lock_ptr != q->lock_ptr)) {
616 spin_unlock(lock_ptr);
623 spin_unlock(lock_ptr);
631 * PI futexes can not be requeued and must remove themselves from the hash
632 * bucket. The hash bucket lock (i.e. lock_ptr) is held.
634 void futex_unqueue_pi(struct futex_q *q)
637 * If the lock was not acquired (due to timeout or signal) then the
638 * rt_waiter is removed before futex_q is. If this is observed by
639 * an unlocker after dropping the rtmutex wait lock and before
640 * acquiring the hash bucket lock, then the unlocker dequeues the
641 * futex_q from the hash bucket list to guarantee consistent state
642 * vs. userspace. Therefore the dequeue here must be conditional.
644 if (!plist_node_empty(&q->list))
647 BUG_ON(!q->pi_state);
648 put_pi_state(q->pi_state);
652 /* Constants for the pending_op argument of handle_futex_death */
653 #define HANDLE_DEATH_PENDING true
654 #define HANDLE_DEATH_LIST false
657 * Process a futex-list entry, check whether it's owned by the
658 * dying task, and do notification if so:
660 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
661 bool pi, bool pending_op)
663 u32 uval, nval, mval;
667 /* Futex address must be 32bit aligned */
668 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
672 if (get_user(uval, uaddr))
676 * Special case for regular (non PI) futexes. The unlock path in
677 * user space has two race scenarios:
679 * 1. The unlock path releases the user space futex value and
680 * before it can execute the futex() syscall to wake up
681 * waiters it is killed.
683 * 2. A woken up waiter is killed before it can acquire the
684 * futex in user space.
686 * In the second case, the wake up notification could be generated
687 * by the unlock path in user space after setting the futex value
688 * to zero or by the kernel after setting the OWNER_DIED bit below.
690 * In both cases the TID validation below prevents a wakeup of
691 * potential waiters which can cause these waiters to block
694 * In both cases the following conditions are met:
696 * 1) task->robust_list->list_op_pending != NULL
697 * @pending_op == true
698 * 2) The owner part of user space futex value == 0
699 * 3) Regular futex: @pi == false
701 * If these conditions are met, it is safe to attempt waking up a
702 * potential waiter without touching the user space futex value and
703 * trying to set the OWNER_DIED bit. If the futex value is zero,
704 * the rest of the user space mutex state is consistent, so a woken
705 * waiter will just take over the uncontended futex. Setting the
706 * OWNER_DIED bit would create inconsistent state and malfunction
707 * of the user space owner died handling. Otherwise, the OWNER_DIED
708 * bit is already set, and the woken waiter is expected to deal with
711 owner = uval & FUTEX_TID_MASK;
713 if (pending_op && !pi && !owner) {
714 futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
715 FUTEX_BITSET_MATCH_ANY);
719 if (owner != task_pid_vnr(curr))
723 * Ok, this dying thread is truly holding a futex
724 * of interest. Set the OWNER_DIED bit atomically
725 * via cmpxchg, and if the value had FUTEX_WAITERS
726 * set, wake up a waiter (if any). (We have to do a
727 * futex_wake() even if OWNER_DIED is already set -
728 * to handle the rare but possible case of recursive
729 * thread-death.) The rest of the cleanup is done in
732 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
735 * We are not holding a lock here, but we want to have
736 * the pagefault_disable/enable() protection because
737 * we want to handle the fault gracefully. If the
738 * access fails we try to fault in the futex with R/W
739 * verification via get_user_pages. get_user() above
740 * does not guarantee R/W access. If that fails we
741 * give up and leave the futex locked.
743 if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) {
746 if (fault_in_user_writeable(uaddr))
764 * Wake robust non-PI futexes here. The wakeup of
765 * PI futexes happens in exit_pi_state():
767 if (!pi && (uval & FUTEX_WAITERS)) {
768 futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1,
769 FUTEX_BITSET_MATCH_ANY);
776 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
778 static inline int fetch_robust_entry(struct robust_list __user **entry,
779 struct robust_list __user * __user *head,
782 unsigned long uentry;
784 if (get_user(uentry, (unsigned long __user *)head))
787 *entry = (void __user *)(uentry & ~1UL);
794 * Walk curr->robust_list (very carefully, it's a userspace list!)
795 * and mark any locks found there dead, and notify any waiters.
797 * We silently return on any sign of list-walking problem.
799 static void exit_robust_list(struct task_struct *curr)
801 struct robust_list_head __user *head = curr->robust_list;
802 struct robust_list __user *entry, *next_entry, *pending;
803 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
804 unsigned int next_pi;
805 unsigned long futex_offset;
809 * Fetch the list head (which was registered earlier, via
810 * sys_set_robust_list()):
812 if (fetch_robust_entry(&entry, &head->list.next, &pi))
815 * Fetch the relative futex offset:
817 if (get_user(futex_offset, &head->futex_offset))
820 * Fetch any possibly pending lock-add first, and handle it
823 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
826 next_entry = NULL; /* avoid warning with gcc */
827 while (entry != &head->list) {
829 * Fetch the next entry in the list before calling
830 * handle_futex_death:
832 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
834 * A pending lock might already be on the list, so
835 * don't process it twice:
837 if (entry != pending) {
838 if (handle_futex_death((void __user *)entry + futex_offset,
839 curr, pi, HANDLE_DEATH_LIST))
847 * Avoid excessively long or circular lists:
856 handle_futex_death((void __user *)pending + futex_offset,
857 curr, pip, HANDLE_DEATH_PENDING);
862 static void __user *futex_uaddr(struct robust_list __user *entry,
863 compat_long_t futex_offset)
865 compat_uptr_t base = ptr_to_compat(entry);
866 void __user *uaddr = compat_ptr(base + futex_offset);
872 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
875 compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
876 compat_uptr_t __user *head, unsigned int *pi)
878 if (get_user(*uentry, head))
881 *entry = compat_ptr((*uentry) & ~1);
882 *pi = (unsigned int)(*uentry) & 1;
888 * Walk curr->robust_list (very carefully, it's a userspace list!)
889 * and mark any locks found there dead, and notify any waiters.
891 * We silently return on any sign of list-walking problem.
893 static void compat_exit_robust_list(struct task_struct *curr)
895 struct compat_robust_list_head __user *head = curr->compat_robust_list;
896 struct robust_list __user *entry, *next_entry, *pending;
897 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
898 unsigned int next_pi;
899 compat_uptr_t uentry, next_uentry, upending;
900 compat_long_t futex_offset;
904 * Fetch the list head (which was registered earlier, via
905 * sys_set_robust_list()):
907 if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
910 * Fetch the relative futex offset:
912 if (get_user(futex_offset, &head->futex_offset))
915 * Fetch any possibly pending lock-add first, and handle it
918 if (compat_fetch_robust_entry(&upending, &pending,
919 &head->list_op_pending, &pip))
922 next_entry = NULL; /* avoid warning with gcc */
923 while (entry != (struct robust_list __user *) &head->list) {
925 * Fetch the next entry in the list before calling
926 * handle_futex_death:
928 rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
929 (compat_uptr_t __user *)&entry->next, &next_pi);
931 * A pending lock might already be on the list, so
932 * dont process it twice:
934 if (entry != pending) {
935 void __user *uaddr = futex_uaddr(entry, futex_offset);
937 if (handle_futex_death(uaddr, curr, pi,
943 uentry = next_uentry;
947 * Avoid excessively long or circular lists:
955 void __user *uaddr = futex_uaddr(pending, futex_offset);
957 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING);
962 #ifdef CONFIG_FUTEX_PI
965 * This task is holding PI mutexes at exit time => bad.
966 * Kernel cleans up PI-state, but userspace is likely hosed.
967 * (Robust-futex cleanup is separate and might save the day for userspace.)
969 static void exit_pi_state_list(struct task_struct *curr)
971 struct list_head *next, *head = &curr->pi_state_list;
972 struct futex_pi_state *pi_state;
973 struct futex_hash_bucket *hb;
974 union futex_key key = FUTEX_KEY_INIT;
977 * We are a ZOMBIE and nobody can enqueue itself on
978 * pi_state_list anymore, but we have to be careful
979 * versus waiters unqueueing themselves:
981 raw_spin_lock_irq(&curr->pi_lock);
982 while (!list_empty(head)) {
984 pi_state = list_entry(next, struct futex_pi_state, list);
986 hb = futex_hash(&key);
989 * We can race against put_pi_state() removing itself from the
990 * list (a waiter going away). put_pi_state() will first
991 * decrement the reference count and then modify the list, so
992 * its possible to see the list entry but fail this reference
995 * In that case; drop the locks to let put_pi_state() make
996 * progress and retry the loop.
998 if (!refcount_inc_not_zero(&pi_state->refcount)) {
999 raw_spin_unlock_irq(&curr->pi_lock);
1001 raw_spin_lock_irq(&curr->pi_lock);
1004 raw_spin_unlock_irq(&curr->pi_lock);
1006 spin_lock(&hb->lock);
1007 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1008 raw_spin_lock(&curr->pi_lock);
1010 * We dropped the pi-lock, so re-check whether this
1011 * task still owns the PI-state:
1013 if (head->next != next) {
1014 /* retain curr->pi_lock for the loop invariant */
1015 raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
1016 spin_unlock(&hb->lock);
1017 put_pi_state(pi_state);
1021 WARN_ON(pi_state->owner != curr);
1022 WARN_ON(list_empty(&pi_state->list));
1023 list_del_init(&pi_state->list);
1024 pi_state->owner = NULL;
1026 raw_spin_unlock(&curr->pi_lock);
1027 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1028 spin_unlock(&hb->lock);
1030 rt_mutex_futex_unlock(&pi_state->pi_mutex);
1031 put_pi_state(pi_state);
1033 raw_spin_lock_irq(&curr->pi_lock);
1035 raw_spin_unlock_irq(&curr->pi_lock);
1038 static inline void exit_pi_state_list(struct task_struct *curr) { }
1041 static void futex_cleanup(struct task_struct *tsk)
1043 if (unlikely(tsk->robust_list)) {
1044 exit_robust_list(tsk);
1045 tsk->robust_list = NULL;
1048 #ifdef CONFIG_COMPAT
1049 if (unlikely(tsk->compat_robust_list)) {
1050 compat_exit_robust_list(tsk);
1051 tsk->compat_robust_list = NULL;
1055 if (unlikely(!list_empty(&tsk->pi_state_list)))
1056 exit_pi_state_list(tsk);
1060 * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD
1061 * @tsk: task to set the state on
1063 * Set the futex exit state of the task lockless. The futex waiter code
1064 * observes that state when a task is exiting and loops until the task has
1065 * actually finished the futex cleanup. The worst case for this is that the
1066 * waiter runs through the wait loop until the state becomes visible.
1068 * This is called from the recursive fault handling path in make_task_dead().
1070 * This is best effort. Either the futex exit code has run already or
1071 * not. If the OWNER_DIED bit has been set on the futex then the waiter can
1072 * take it over. If not, the problem is pushed back to user space. If the
1073 * futex exit code did not run yet, then an already queued waiter might
1074 * block forever, but there is nothing which can be done about that.
1076 void futex_exit_recursive(struct task_struct *tsk)
1078 /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */
1079 if (tsk->futex_state == FUTEX_STATE_EXITING)
1080 mutex_unlock(&tsk->futex_exit_mutex);
1081 tsk->futex_state = FUTEX_STATE_DEAD;
1084 static void futex_cleanup_begin(struct task_struct *tsk)
1087 * Prevent various race issues against a concurrent incoming waiter
1088 * including live locks by forcing the waiter to block on
1089 * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in
1090 * attach_to_pi_owner().
1092 mutex_lock(&tsk->futex_exit_mutex);
1095 * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock.
1097 * This ensures that all subsequent checks of tsk->futex_state in
1098 * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with
1099 * tsk->pi_lock held.
1101 * It guarantees also that a pi_state which was queued right before
1102 * the state change under tsk->pi_lock by a concurrent waiter must
1103 * be observed in exit_pi_state_list().
1105 raw_spin_lock_irq(&tsk->pi_lock);
1106 tsk->futex_state = FUTEX_STATE_EXITING;
1107 raw_spin_unlock_irq(&tsk->pi_lock);
1110 static void futex_cleanup_end(struct task_struct *tsk, int state)
1113 * Lockless store. The only side effect is that an observer might
1114 * take another loop until it becomes visible.
1116 tsk->futex_state = state;
1118 * Drop the exit protection. This unblocks waiters which observed
1119 * FUTEX_STATE_EXITING to reevaluate the state.
1121 mutex_unlock(&tsk->futex_exit_mutex);
1124 void futex_exec_release(struct task_struct *tsk)
1127 * The state handling is done for consistency, but in the case of
1128 * exec() there is no way to prevent further damage as the PID stays
1129 * the same. But for the unlikely and arguably buggy case that a
1130 * futex is held on exec(), this provides at least as much state
1131 * consistency protection which is possible.
1133 futex_cleanup_begin(tsk);
1136 * Reset the state to FUTEX_STATE_OK. The task is alive and about
1137 * exec a new binary.
1139 futex_cleanup_end(tsk, FUTEX_STATE_OK);
1142 void futex_exit_release(struct task_struct *tsk)
1144 futex_cleanup_begin(tsk);
1146 futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
1149 static int __init futex_init(void)
1151 unsigned int futex_shift;
1154 #ifdef CONFIG_BASE_SMALL
1155 futex_hashsize = 16;
1157 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
1160 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
1161 futex_hashsize, 0, 0,
1163 futex_hashsize, futex_hashsize);
1164 futex_hashsize = 1UL << futex_shift;
1166 for (i = 0; i < futex_hashsize; i++) {
1167 atomic_set(&futex_queues[i].waiters, 0);
1168 plist_head_init(&futex_queues[i].chain);
1169 spin_lock_init(&futex_queues[i].lock);
1174 core_initcall(futex_init);