1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/slab.h>
4 #include <linux/sched/rt.h>
5 #include <linux/sched/task.h>
8 #include "../locking/rtmutex_common.h"
13 int refill_pi_state_cache(void)
15 struct futex_pi_state *pi_state;
17 if (likely(current->pi_state_cache))
20 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
25 INIT_LIST_HEAD(&pi_state->list);
26 /* pi_mutex gets initialized later */
27 pi_state->owner = NULL;
28 refcount_set(&pi_state->refcount, 1);
29 pi_state->key = FUTEX_KEY_INIT;
31 current->pi_state_cache = pi_state;
36 static struct futex_pi_state *alloc_pi_state(void)
38 struct futex_pi_state *pi_state = current->pi_state_cache;
41 current->pi_state_cache = NULL;
46 static void pi_state_update_owner(struct futex_pi_state *pi_state,
47 struct task_struct *new_owner)
49 struct task_struct *old_owner = pi_state->owner;
51 lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
54 raw_spin_lock(&old_owner->pi_lock);
55 WARN_ON(list_empty(&pi_state->list));
56 list_del_init(&pi_state->list);
57 raw_spin_unlock(&old_owner->pi_lock);
61 raw_spin_lock(&new_owner->pi_lock);
62 WARN_ON(!list_empty(&pi_state->list));
63 list_add(&pi_state->list, &new_owner->pi_state_list);
64 pi_state->owner = new_owner;
65 raw_spin_unlock(&new_owner->pi_lock);
69 void get_pi_state(struct futex_pi_state *pi_state)
71 WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
75 * Drops a reference to the pi_state object and frees or caches it
76 * when the last reference is gone.
78 void put_pi_state(struct futex_pi_state *pi_state)
83 if (!refcount_dec_and_test(&pi_state->refcount))
87 * If pi_state->owner is NULL, the owner is most probably dying
88 * and has cleaned up the pi_state already
90 if (pi_state->owner) {
93 raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
94 pi_state_update_owner(pi_state, NULL);
95 rt_mutex_proxy_unlock(&pi_state->pi_mutex);
96 raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
99 if (current->pi_state_cache) {
103 * pi_state->list is already empty.
104 * clear pi_state->owner.
105 * refcount is at 0 - put it back to 1.
107 pi_state->owner = NULL;
108 refcount_set(&pi_state->refcount, 1);
109 current->pi_state_cache = pi_state;
114 * We need to check the following states:
116 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
118 * [1] NULL | --- | --- | 0 | 0/1 | Valid
119 * [2] NULL | --- | --- | >0 | 0/1 | Valid
121 * [3] Found | NULL | -- | Any | 0/1 | Invalid
123 * [4] Found | Found | NULL | 0 | 1 | Valid
124 * [5] Found | Found | NULL | >0 | 1 | Invalid
126 * [6] Found | Found | task | 0 | 1 | Valid
128 * [7] Found | Found | NULL | Any | 0 | Invalid
130 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
131 * [9] Found | Found | task | 0 | 0 | Invalid
132 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
134 * [1] Indicates that the kernel can acquire the futex atomically. We
135 * came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
137 * [2] Valid, if TID does not belong to a kernel thread. If no matching
138 * thread is found then it indicates that the owner TID has died.
140 * [3] Invalid. The waiter is queued on a non PI futex
142 * [4] Valid state after exit_robust_list(), which sets the user space
143 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
145 * [5] The user space value got manipulated between exit_robust_list()
146 * and exit_pi_state_list()
148 * [6] Valid state after exit_pi_state_list() which sets the new owner in
149 * the pi_state but cannot access the user space value.
151 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
153 * [8] Owner and user space value match
155 * [9] There is no transient state which sets the user space TID to 0
156 * except exit_robust_list(), but this is indicated by the
157 * FUTEX_OWNER_DIED bit. See [4]
159 * [10] There is no transient state which leaves owner and user space
160 * TID out of sync. Except one error case where the kernel is denied
161 * write access to the user address, see fixup_pi_state_owner().
164 * Serialization and lifetime rules:
168 * hb -> futex_q, relation
169 * futex_q -> pi_state, relation
171 * (cannot be raw because hb can contain arbitrary amount
174 * pi_mutex->wait_lock:
178 * (and pi_mutex 'obviously')
182 * p->pi_state_list -> pi_state->list, relation
183 * pi_mutex->owner -> pi_state->owner, relation
185 * pi_state->refcount:
193 * pi_mutex->wait_lock
199 * Validate that the existing waiter has a pi_state and sanity check
200 * the pi_state against the user space value. If correct, attach to
203 static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
204 struct futex_pi_state *pi_state,
205 struct futex_pi_state **ps)
207 pid_t pid = uval & FUTEX_TID_MASK;
212 * Userspace might have messed up non-PI and PI futexes [3]
214 if (unlikely(!pi_state))
218 * We get here with hb->lock held, and having found a
219 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
220 * has dropped the hb->lock in between futex_queue() and futex_unqueue_pi(),
221 * which in turn means that futex_lock_pi() still has a reference on
224 * The waiter holding a reference on @pi_state also protects against
225 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
226 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
227 * free pi_state before we can take a reference ourselves.
229 WARN_ON(!refcount_read(&pi_state->refcount));
232 * Now that we have a pi_state, we can acquire wait_lock
233 * and do the state validation.
235 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
238 * Since {uval, pi_state} is serialized by wait_lock, and our current
239 * uval was read without holding it, it can have changed. Verify it
240 * still is what we expect it to be, otherwise retry the entire
243 if (futex_get_value_locked(&uval2, uaddr))
250 * Handle the owner died case:
252 if (uval & FUTEX_OWNER_DIED) {
254 * exit_pi_state_list sets owner to NULL and wakes the
255 * topmost waiter. The task which acquires the
256 * pi_state->rt_mutex will fixup owner.
258 if (!pi_state->owner) {
260 * No pi state owner, but the user space TID
261 * is not 0. Inconsistent state. [5]
266 * Take a ref on the state and return success. [4]
272 * If TID is 0, then either the dying owner has not
273 * yet executed exit_pi_state_list() or some waiter
274 * acquired the rtmutex in the pi state, but did not
275 * yet fixup the TID in user space.
277 * Take a ref on the state and return success. [6]
283 * If the owner died bit is not set, then the pi_state
284 * must have an owner. [7]
286 if (!pi_state->owner)
291 * Bail out if user space manipulated the futex value. If pi
292 * state exists then the owner TID must be the same as the
293 * user space TID. [9/10]
295 if (pid != task_pid_vnr(pi_state->owner))
299 get_pi_state(pi_state);
300 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
317 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
321 static int handle_exit_race(u32 __user *uaddr, u32 uval,
322 struct task_struct *tsk)
327 * If the futex exit state is not yet FUTEX_STATE_DEAD, tell the
328 * caller that the alleged owner is busy.
330 if (tsk && tsk->futex_state != FUTEX_STATE_DEAD)
334 * Reread the user space value to handle the following situation:
338 * sys_exit() sys_futex()
339 * do_exit() futex_lock_pi()
340 * futex_lock_pi_atomic()
341 * exit_signals(tsk) No waiters:
342 * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
343 * mm_release(tsk) Set waiter bit
344 * exit_robust_list(tsk) { *uaddr = 0x80000PID;
345 * Set owner died attach_to_pi_owner() {
346 * *uaddr = 0xC0000000; tsk = get_task(PID);
347 * } if (!tsk->flags & PF_EXITING) {
349 * tsk->futex_state = } else {
350 * FUTEX_STATE_DEAD; if (tsk->futex_state !=
353 * return -ESRCH; <--- FAIL
356 * Returning ESRCH unconditionally is wrong here because the
357 * user space value has been changed by the exiting task.
359 * The same logic applies to the case where the exiting task is
362 if (futex_get_value_locked(&uval2, uaddr))
365 /* If the user space value has changed, try again. */
370 * The exiting task did not have a robust list, the robust list was
371 * corrupted or the user space value in *uaddr is simply bogus.
372 * Give up and tell user space.
377 static void __attach_to_pi_owner(struct task_struct *p, union futex_key *key,
378 struct futex_pi_state **ps)
381 * No existing pi state. First waiter. [2]
383 * This creates pi_state, we have hb->lock held, this means nothing can
384 * observe this state, wait_lock is irrelevant.
386 struct futex_pi_state *pi_state = alloc_pi_state();
389 * Initialize the pi_mutex in locked state and make @p
392 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
394 /* Store the key for possible exit cleanups: */
395 pi_state->key = *key;
397 WARN_ON(!list_empty(&pi_state->list));
398 list_add(&pi_state->list, &p->pi_state_list);
400 * Assignment without holding pi_state->pi_mutex.wait_lock is safe
401 * because there is no concurrency as the object is not published yet.
408 * Lookup the task for the TID provided from user space and attach to
409 * it after doing proper sanity checks.
411 static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
412 struct futex_pi_state **ps,
413 struct task_struct **exiting)
415 pid_t pid = uval & FUTEX_TID_MASK;
416 struct task_struct *p;
419 * We are the first waiter - try to look up the real owner and attach
420 * the new pi_state to it, but bail out when TID = 0 [1]
422 * The !pid check is paranoid. None of the call sites should end up
423 * with pid == 0, but better safe than sorry. Let the caller retry
427 p = find_get_task_by_vpid(pid);
429 return handle_exit_race(uaddr, uval, NULL);
431 if (unlikely(p->flags & PF_KTHREAD)) {
437 * We need to look at the task state to figure out, whether the
438 * task is exiting. To protect against the change of the task state
439 * in futex_exit_release(), we do this protected by p->pi_lock:
441 raw_spin_lock_irq(&p->pi_lock);
442 if (unlikely(p->futex_state != FUTEX_STATE_OK)) {
444 * The task is on the way out. When the futex state is
445 * FUTEX_STATE_DEAD, we know that the task has finished
448 int ret = handle_exit_race(uaddr, uval, p);
450 raw_spin_unlock_irq(&p->pi_lock);
452 * If the owner task is between FUTEX_STATE_EXITING and
453 * FUTEX_STATE_DEAD then store the task pointer and keep
454 * the reference on the task struct. The calling code will
455 * drop all locks, wait for the task to reach
456 * FUTEX_STATE_DEAD and then drop the refcount. This is
457 * required to prevent a live lock when the current task
458 * preempted the exiting task between the two states.
467 __attach_to_pi_owner(p, key, ps);
468 raw_spin_unlock_irq(&p->pi_lock);
475 static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
480 if (unlikely(should_fail_futex(true)))
483 err = futex_cmpxchg_value_locked(&curval, uaddr, uval, newval);
487 /* If user space value changed, let the caller retry */
488 return curval != uval ? -EAGAIN : 0;
492 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
493 * @uaddr: the pi futex user address
494 * @hb: the pi futex hash bucket
495 * @key: the futex key associated with uaddr and hb
496 * @ps: the pi_state pointer where we store the result of the
498 * @task: the task to perform the atomic lock work for. This will
499 * be "current" except in the case of requeue pi.
500 * @exiting: Pointer to store the task pointer of the owner task
501 * which is in the middle of exiting
502 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
505 * - 0 - ready to wait;
506 * - 1 - acquired the lock;
509 * The hb->lock must be held by the caller.
511 * @exiting is only set when the return value is -EBUSY. If so, this holds
512 * a refcount on the exiting task on return and the caller needs to drop it
513 * after waiting for the exit to complete.
515 int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
516 union futex_key *key,
517 struct futex_pi_state **ps,
518 struct task_struct *task,
519 struct task_struct **exiting,
522 u32 uval, newval, vpid = task_pid_vnr(task);
523 struct futex_q *top_waiter;
527 * Read the user space value first so we can validate a few
528 * things before proceeding further.
530 if (futex_get_value_locked(&uval, uaddr))
533 if (unlikely(should_fail_futex(true)))
539 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
542 if ((unlikely(should_fail_futex(true))))
546 * Lookup existing state first. If it exists, try to attach to
549 top_waiter = futex_top_waiter(hb, key);
551 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
554 * No waiter and user TID is 0. We are here because the
555 * waiters or the owner died bit is set or called from
556 * requeue_cmp_pi or for whatever reason something took the
559 if (!(uval & FUTEX_TID_MASK)) {
561 * We take over the futex. No other waiters and the user space
562 * TID is 0. We preserve the owner died bit.
564 newval = uval & FUTEX_OWNER_DIED;
567 /* The futex requeue_pi code can enforce the waiters bit */
569 newval |= FUTEX_WAITERS;
571 ret = lock_pi_update_atomic(uaddr, uval, newval);
576 * If the waiter bit was requested the caller also needs PI
577 * state attached to the new owner of the user space futex.
579 * @task is guaranteed to be alive and it cannot be exiting
580 * because it is either sleeping or waiting in
581 * futex_requeue_pi_wakeup_sync().
583 * No need to do the full attach_to_pi_owner() exercise
584 * because @task is known and valid.
587 raw_spin_lock_irq(&task->pi_lock);
588 __attach_to_pi_owner(task, key, ps);
589 raw_spin_unlock_irq(&task->pi_lock);
595 * First waiter. Set the waiters bit before attaching ourself to
596 * the owner. If owner tries to unlock, it will be forced into
597 * the kernel and blocked on hb->lock.
599 newval = uval | FUTEX_WAITERS;
600 ret = lock_pi_update_atomic(uaddr, uval, newval);
604 * If the update of the user space value succeeded, we try to
605 * attach to the owner. If that fails, no harm done, we only
606 * set the FUTEX_WAITERS bit in the user space variable.
608 return attach_to_pi_owner(uaddr, newval, key, ps, exiting);
612 * Caller must hold a reference on @pi_state.
614 static int wake_futex_pi(u32 __user *uaddr, u32 uval,
615 struct futex_pi_state *pi_state,
616 struct rt_mutex_waiter *top_waiter)
618 struct task_struct *new_owner;
619 bool postunlock = false;
620 DEFINE_RT_WAKE_Q(wqh);
624 new_owner = top_waiter->task;
627 * We pass it to the next owner. The WAITERS bit is always kept
628 * enabled while there is PI state around. We cleanup the owner
629 * died bit, because we are the owner.
631 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
633 if (unlikely(should_fail_futex(true))) {
638 ret = futex_cmpxchg_value_locked(&curval, uaddr, uval, newval);
639 if (!ret && (curval != uval)) {
641 * If a unconditional UNLOCK_PI operation (user space did not
642 * try the TID->0 transition) raced with a waiter setting the
643 * FUTEX_WAITERS flag between get_user() and locking the hash
644 * bucket lock, retry the operation.
646 if ((FUTEX_TID_MASK & curval) == uval)
654 * This is a point of no return; once we modified the uval
655 * there is no going back and subsequent operations must
658 pi_state_update_owner(pi_state, new_owner);
659 postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wqh);
663 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
666 rt_mutex_postunlock(&wqh);
671 static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
672 struct task_struct *argowner)
674 struct futex_pi_state *pi_state = q->pi_state;
675 struct task_struct *oldowner, *newowner;
676 u32 uval, curval, newval, newtid;
679 oldowner = pi_state->owner;
682 * We are here because either:
684 * - we stole the lock and pi_state->owner needs updating to reflect
685 * that (@argowner == current),
689 * - someone stole our lock and we need to fix things to point to the
690 * new owner (@argowner == NULL).
692 * Either way, we have to replace the TID in the user space variable.
693 * This must be atomic as we have to preserve the owner died bit here.
695 * Note: We write the user space value _before_ changing the pi_state
696 * because we can fault here. Imagine swapped out pages or a fork
697 * that marked all the anonymous memory readonly for cow.
699 * Modifying pi_state _before_ the user space value would leave the
700 * pi_state in an inconsistent state when we fault here, because we
701 * need to drop the locks to handle the fault. This might be observed
702 * in the PID checks when attaching to PI state .
706 if (oldowner != current) {
708 * We raced against a concurrent self; things are
709 * already fixed up. Nothing to do.
714 if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
715 /* We got the lock. pi_state is correct. Tell caller. */
720 * The trylock just failed, so either there is an owner or
721 * there is a higher priority waiter than this one.
723 newowner = rt_mutex_owner(&pi_state->pi_mutex);
725 * If the higher priority waiter has not yet taken over the
726 * rtmutex then newowner is NULL. We can't return here with
727 * that state because it's inconsistent vs. the user space
728 * state. So drop the locks and try again. It's a valid
729 * situation and not any different from the other retry
732 if (unlikely(!newowner)) {
737 WARN_ON_ONCE(argowner != current);
738 if (oldowner == current) {
740 * We raced against a concurrent self; things are
741 * already fixed up. Nothing to do.
748 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
750 if (!pi_state->owner)
751 newtid |= FUTEX_OWNER_DIED;
753 err = futex_get_value_locked(&uval, uaddr);
758 newval = (uval & FUTEX_OWNER_DIED) | newtid;
760 err = futex_cmpxchg_value_locked(&curval, uaddr, uval, newval);
770 * We fixed up user space. Now we need to fix the pi_state
773 pi_state_update_owner(pi_state, newowner);
775 return argowner == current;
778 * In order to reschedule or handle a page fault, we need to drop the
779 * locks here. In the case of a fault, this gives the other task
780 * (either the highest priority waiter itself or the task which stole
781 * the rtmutex) the chance to try the fixup of the pi_state. So once we
782 * are back from handling the fault we need to check the pi_state after
783 * reacquiring the locks and before trying to do another fixup. When
784 * the fixup has been done already we simply return.
786 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
787 * drop hb->lock since the caller owns the hb -> futex_q relation.
788 * Dropping the pi_mutex->wait_lock requires the state revalidate.
791 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
792 spin_unlock(q->lock_ptr);
796 err = fault_in_user_writeable(uaddr);
809 spin_lock(q->lock_ptr);
810 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
813 * Check if someone else fixed it for us:
815 if (pi_state->owner != oldowner)
816 return argowner == current;
818 /* Retry if err was -EAGAIN or the fault in succeeded */
823 * fault_in_user_writeable() failed so user state is immutable. At
824 * best we can make the kernel state consistent but user state will
825 * be most likely hosed and any subsequent unlock operation will be
826 * rejected due to PI futex rule [10].
828 * Ensure that the rtmutex owner is also the pi_state owner despite
829 * the user space value claiming something different. There is no
830 * point in unlocking the rtmutex if current is the owner as it
831 * would need to wait until the next waiter has taken the rtmutex
832 * to guarantee consistent state. Keep it simple. Userspace asked
833 * for this wreckaged state.
835 * The rtmutex has an owner - either current or some other
836 * task. See the EAGAIN loop above.
838 pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
843 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
844 struct task_struct *argowner)
846 struct futex_pi_state *pi_state = q->pi_state;
849 lockdep_assert_held(q->lock_ptr);
851 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
852 ret = __fixup_pi_state_owner(uaddr, q, argowner);
853 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
858 * fixup_pi_owner() - Post lock pi_state and corner case management
859 * @uaddr: user address of the futex
860 * @q: futex_q (contains pi_state and access to the rt_mutex)
861 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
863 * After attempting to lock an rt_mutex, this function is called to cleanup
864 * the pi_state owner as well as handle race conditions that may allow us to
865 * acquire the lock. Must be called with the hb lock held.
868 * - 1 - success, lock taken;
869 * - 0 - success, lock not taken;
870 * - <0 - on error (-EFAULT)
872 int fixup_pi_owner(u32 __user *uaddr, struct futex_q *q, int locked)
876 * Got the lock. We might not be the anticipated owner if we
877 * did a lock-steal - fix up the PI-state in that case:
879 * Speculative pi_state->owner read (we don't hold wait_lock);
880 * since we own the lock pi_state->owner == current is the
881 * stable state, anything else needs more attention.
883 if (q->pi_state->owner != current)
884 return fixup_pi_state_owner(uaddr, q, current);
889 * If we didn't get the lock; check if anybody stole it from us. In
890 * that case, we need to fix up the uval to point to them instead of
891 * us, otherwise bad things happen. [10]
893 * Another speculative read; pi_state->owner == current is unstable
894 * but needs our attention.
896 if (q->pi_state->owner == current)
897 return fixup_pi_state_owner(uaddr, q, NULL);
900 * Paranoia check. If we did not take the lock, then we should not be
901 * the owner of the rt_mutex. Warn and establish consistent state.
903 if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
904 return fixup_pi_state_owner(uaddr, q, current);
910 * Userspace tried a 0 -> TID atomic transition of the futex value
911 * and failed. The kernel side here does the whole locking operation:
912 * if there are waiters then it will block as a consequence of relying
913 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
914 * a 0 value of the futex too.).
916 * Also serves as futex trylock_pi()'ing, and due semantics.
918 int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int trylock)
920 struct hrtimer_sleeper timeout, *to;
921 struct task_struct *exiting = NULL;
922 struct rt_mutex_waiter rt_waiter;
923 struct futex_hash_bucket *hb;
924 struct futex_q q = futex_q_init;
925 DEFINE_WAKE_Q(wake_q);
928 if (!IS_ENABLED(CONFIG_FUTEX_PI))
931 if (refill_pi_state_cache())
934 to = futex_setup_timer(time, &timeout, flags, 0);
937 ret = get_futex_key(uaddr, flags, &q.key, FUTEX_WRITE);
938 if (unlikely(ret != 0))
942 hb = futex_q_lock(&q);
944 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
948 * Atomic work succeeded and we got the lock,
949 * or failed. Either way, we do _not_ block.
953 /* We got the lock. */
955 goto out_unlock_put_key;
961 * Two reasons for this:
962 * - EBUSY: Task is exiting and we just wait for the
964 * - EAGAIN: The user space value changed.
968 * Handle the case where the owner is in the middle of
969 * exiting. Wait for the exit to complete otherwise
970 * this task might loop forever, aka. live lock.
972 wait_for_owner_exiting(ret, exiting);
976 goto out_unlock_put_key;
980 WARN_ON(!q.pi_state);
983 * Only actually queue now that the atomic ops are done:
985 __futex_queue(&q, hb, current);
988 ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
989 /* Fixup the trylock return value: */
990 ret = ret ? 0 : -EWOULDBLOCK;
995 * Must be done before we enqueue the waiter, here is unfortunately
996 * under the hb lock, but that *should* work because it does nothing.
998 rt_mutex_pre_schedule();
1000 rt_mutex_init_waiter(&rt_waiter);
1003 * On PREEMPT_RT, when hb->lock becomes an rt_mutex, we must not
1004 * hold it while doing rt_mutex_start_proxy(), because then it will
1005 * include hb->lock in the blocking chain, even through we'll not in
1006 * fact hold it while blocking. This will lead it to report -EDEADLK
1007 * and BUG when futex_unlock_pi() interleaves with this.
1009 * Therefore acquire wait_lock while holding hb->lock, but drop the
1010 * latter before calling __rt_mutex_start_proxy_lock(). This
1011 * interleaves with futex_unlock_pi() -- which does a similar lock
1012 * handoff -- such that the latter can observe the futex_q::pi_state
1013 * before __rt_mutex_start_proxy_lock() is done.
1015 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
1016 spin_unlock(q.lock_ptr);
1018 * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
1019 * such that futex_unlock_pi() is guaranteed to observe the waiter when
1020 * it sees the futex_q::pi_state.
1022 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current, &wake_q);
1023 raw_spin_unlock_irq_wake(&q.pi_state->pi_mutex.wait_lock, &wake_q);
1032 hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
1034 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
1038 * If we failed to acquire the lock (deadlock/signal/timeout), we must
1039 * must unwind the above, however we canont lock hb->lock because
1040 * rt_mutex already has a waiter enqueued and hb->lock can itself try
1041 * and enqueue an rt_waiter through rtlock.
1043 * Doing the cleanup without holding hb->lock can cause inconsistent
1044 * state between hb and pi_state, but only in the direction of not
1045 * seeing a waiter that is leaving.
1047 * See futex_unlock_pi(), it deals with this inconsistency.
1049 * There be dragons here, since we must deal with the inconsistency on
1050 * the way out (here), it is impossible to detect/warn about the race
1051 * the other way around (missing an incoming waiter).
1053 * What could possibly go wrong...
1055 if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
1059 * Now that the rt_waiter has been dequeued, it is safe to use
1060 * spinlock/rtlock (which might enqueue its own rt_waiter) and fix up
1063 spin_lock(q.lock_ptr);
1065 * Waiter is unqueued.
1067 rt_mutex_post_schedule();
1070 * Fixup the pi_state owner and possibly acquire the lock if we
1073 res = fixup_pi_owner(uaddr, &q, !ret);
1075 * If fixup_pi_owner() returned an error, propagate that. If it acquired
1076 * the lock, clear our -ETIMEDOUT or -EINTR.
1079 ret = (res < 0) ? res : 0;
1081 futex_unqueue_pi(&q);
1082 spin_unlock(q.lock_ptr);
1090 hrtimer_cancel(&to->timer);
1091 destroy_hrtimer_on_stack(&to->timer);
1093 return ret != -EINTR ? ret : -ERESTARTNOINTR;
1098 ret = fault_in_user_writeable(uaddr);
1102 if (!(flags & FLAGS_SHARED))
1109 * Userspace attempted a TID -> 0 atomic transition, and failed.
1110 * This is the in-kernel slowpath: we look up the PI state (if any),
1111 * and do the rt-mutex unlock.
1113 int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
1115 u32 curval, uval, vpid = task_pid_vnr(current);
1116 union futex_key key = FUTEX_KEY_INIT;
1117 struct futex_hash_bucket *hb;
1118 struct futex_q *top_waiter;
1121 if (!IS_ENABLED(CONFIG_FUTEX_PI))
1125 if (get_user(uval, uaddr))
1128 * We release only a lock we actually own:
1130 if ((uval & FUTEX_TID_MASK) != vpid)
1133 ret = get_futex_key(uaddr, flags, &key, FUTEX_WRITE);
1137 hb = futex_hash(&key);
1138 spin_lock(&hb->lock);
1142 * Check waiters first. We do not trust user space values at
1143 * all and we at least want to know if user space fiddled
1144 * with the futex value instead of blindly unlocking.
1146 top_waiter = futex_top_waiter(hb, &key);
1148 struct futex_pi_state *pi_state = top_waiter->pi_state;
1149 struct rt_mutex_waiter *rt_waiter;
1156 * If current does not own the pi_state then the futex is
1157 * inconsistent and user space fiddled with the futex value.
1159 if (pi_state->owner != current)
1163 * By taking wait_lock while still holding hb->lock, we ensure
1164 * there is no point where we hold neither; and thereby
1165 * wake_futex_pi() must observe any new waiters.
1167 * Since the cleanup: case in futex_lock_pi() removes the
1168 * rt_waiter without holding hb->lock, it is possible for
1169 * wake_futex_pi() to not find a waiter while the above does,
1170 * in this case the waiter is on the way out and it can be
1173 * In particular; this forces __rt_mutex_start_proxy() to
1174 * complete such that we're guaranteed to observe the
1177 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1180 * Futex vs rt_mutex waiter state -- if there are no rt_mutex
1181 * waiters even though futex thinks there are, then the waiter
1182 * is leaving. The entry needs to be removed from the list so a
1183 * new futex_lock_pi() is not using this stale PI-state while
1184 * the futex is available in user space again.
1185 * There can be more than one task on its way out so it needs
1188 rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
1190 __futex_unqueue(top_waiter);
1191 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1195 get_pi_state(pi_state);
1196 spin_unlock(&hb->lock);
1198 /* drops pi_state->pi_mutex.wait_lock */
1199 ret = wake_futex_pi(uaddr, uval, pi_state, rt_waiter);
1201 put_pi_state(pi_state);
1204 * Success, we're done! No tricky corner cases.
1209 * The atomic access to the futex value generated a
1210 * pagefault, so retry the user-access and the wakeup:
1215 * A unconditional UNLOCK_PI op raced against a waiter
1216 * setting the FUTEX_WAITERS bit. Try again.
1221 * wake_futex_pi has detected invalid state. Tell user
1228 * We have no kernel internal state, i.e. no waiters in the
1229 * kernel. Waiters which are about to queue themselves are stuck
1230 * on hb->lock. So we can safely ignore them. We do neither
1231 * preserve the WAITERS bit not the OWNER_DIED one. We are the
1234 if ((ret = futex_cmpxchg_value_locked(&curval, uaddr, uval, 0))) {
1235 spin_unlock(&hb->lock);
1250 * If uval has changed, let user space handle it.
1252 ret = (curval == uval) ? 0 : -EAGAIN;
1255 spin_unlock(&hb->lock);
1264 ret = fault_in_user_writeable(uaddr);