]> Git Repo - J-linux.git/blobdiff - kernel/locking/rtmutex_api.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / kernel / locking / rtmutex_api.c
index 7e79258feb279d44fdde7f2ba8278babc8384120..191e4720e546627aed0d7ec715673b1b8753b130 100644 (file)
@@ -275,6 +275,7 @@ void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
  * @lock:              the rt_mutex to take
  * @waiter:            the pre-initialized rt_mutex_waiter
  * @task:              the task to prepare
+ * @wake_q:            the wake_q to wake tasks after we release the wait_lock
  *
  * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
  * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
@@ -291,7 +292,8 @@ void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
  */
 int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
                                        struct rt_mutex_waiter *waiter,
-                                       struct task_struct *task)
+                                       struct task_struct *task,
+                                       struct wake_q_head *wake_q)
 {
        int ret;
 
@@ -302,7 +304,7 @@ int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
 
        /* We enforce deadlock detection for futexes */
        ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
-                                     RT_MUTEX_FULL_CHAINWALK);
+                                     RT_MUTEX_FULL_CHAINWALK, wake_q);
 
        if (ret && !rt_mutex_owner(lock)) {
                /*
@@ -341,12 +343,16 @@ int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
                                      struct task_struct *task)
 {
        int ret;
+       DEFINE_WAKE_Q(wake_q);
 
        raw_spin_lock_irq(&lock->wait_lock);
-       ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
+       ret = __rt_mutex_start_proxy_lock(lock, waiter, task, &wake_q);
        if (unlikely(ret))
                remove_waiter(lock, waiter);
+       preempt_disable();
        raw_spin_unlock_irq(&lock->wait_lock);
+       wake_up_q(&wake_q);
+       preempt_enable();
 
        return ret;
 }
@@ -377,7 +383,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
        raw_spin_lock_irq(&lock->wait_lock);
        /* sleep on the mutex */
        set_current_state(TASK_INTERRUPTIBLE);
-       ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
+       ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter, NULL);
        /*
         * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
         * have to fix that up.
This page took 0.027082 seconds and 4 git commands to generate.