]> Git Repo - J-linux.git/commitdiff
lockdep: fix deadlock issue between lockdep and rcu
authorZhiguo Niu <[email protected]>
Thu, 20 Jun 2024 22:54:34 +0000 (22:54 +0000)
committerBoqun Feng <[email protected]>
Tue, 6 Aug 2024 17:46:42 +0000 (10:46 -0700)
There is a deadlock scenario between lockdep and rcu when
rcu nocb feature is enabled, just as following call stack:

     rcuop/x
-000|queued_spin_lock_slowpath(lock = 0xFFFFFF817F2A8A80, val = ?)
-001|queued_spin_lock(inline) // try to hold nocb_gp_lock
-001|do_raw_spin_lock(lock = 0xFFFFFF817F2A8A80)
-002|__raw_spin_lock_irqsave(inline)
-002|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F2A8A80)
-003|wake_nocb_gp_defer(inline)
-003|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F30B680)
-004|__call_rcu_common(inline)
-004|call_rcu(head = 0xFFFFFFC082EECC28, func = ?)
-005|call_rcu_zapped(inline)
-005|free_zapped_rcu(ch = ?)// hold graph lock
-006|rcu_do_batch(rdp = 0xFFFFFF817F245680)
-007|nocb_cb_wait(inline)
-007|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F245680)
-008|kthread(_create = 0xFFFFFF80803122C0)
-009|ret_from_fork(asm)

     rcuop/y
-000|queued_spin_lock_slowpath(lock = 0xFFFFFFC08291BBC8, val = 0)
-001|queued_spin_lock()
-001|lockdep_lock()
-001|graph_lock() // try to hold graph lock
-002|lookup_chain_cache_add()
-002|validate_chain()
-003|lock_acquire
-004|_raw_spin_lock_irqsave(lock = 0xFFFFFF817F211D80)
-005|lock_timer_base(inline)
-006|mod_timer(inline)
-006|wake_nocb_gp_defer(inline)// hold nocb_gp_lock
-006|__call_rcu_nocb_wake(rdp = 0xFFFFFF817F2A8680)
-007|__call_rcu_common(inline)
-007|call_rcu(head = 0xFFFFFFC0822E0B58, func = ?)
-008|call_rcu_hurry(inline)
-008|rcu_sync_call(inline)
-008|rcu_sync_func(rhp = 0xFFFFFFC0822E0B58)
-009|rcu_do_batch(rdp = 0xFFFFFF817F266680)
-010|nocb_cb_wait(inline)
-010|rcu_nocb_cb_kthread(arg = 0xFFFFFF817F266680)
-011|kthread(_create = 0xFFFFFF8080363740)
-012|ret_from_fork(asm)

rcuop/x and rcuop/y are rcu nocb threads with the same nocb gp thread.
This patch release the graph lock before lockdep call_rcu.

Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use")
Cc: [email protected]
Cc: Boqun Feng <[email protected]>
Cc: Waiman Long <[email protected]>
Cc: Carlos Llamas <[email protected]>
Cc: Bart Van Assche <[email protected]>
Signed-off-by: Zhiguo Niu <[email protected]>
Signed-off-by: Xuewen Yan <[email protected]>
Reviewed-by: Waiman Long <[email protected]>
Reviewed-by: Carlos Llamas <[email protected]>
Reviewed-by: Bart Van Assche <[email protected]>
Signed-off-by: Carlos Llamas <[email protected]>
Acked-by: Paul E. McKenney <[email protected]>
Signed-off-by: Boqun Feng <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
kernel/locking/lockdep.c

index 266f57f36f697d36da630842f877416a50d138da..b172ead28f1c501ba44df4221545b1298b09d417 100644 (file)
@@ -6186,25 +6186,27 @@ static struct pending_free *get_pending_free(void)
 static void free_zapped_rcu(struct rcu_head *cb);
 
 /*
- * Schedule an RCU callback if no RCU callback is pending. Must be called with
- * the graph lock held.
- */
-static void call_rcu_zapped(struct pending_free *pf)
+* See if we need to queue an RCU callback, must called with
+* the lockdep lock held, returns false if either we don't have
+* any pending free or the callback is already scheduled.
+* Otherwise, a call_rcu() must follow this function call.
+*/
+static bool prepare_call_rcu_zapped(struct pending_free *pf)
 {
        WARN_ON_ONCE(inside_selftest());
 
        if (list_empty(&pf->zapped))
-               return;
+               return false;
 
        if (delayed_free.scheduled)
-               return;
+               return false;
 
        delayed_free.scheduled = true;
 
        WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
        delayed_free.index ^= 1;
 
-       call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+       return true;
 }
 
 /* The caller must hold the graph lock. May be called from RCU context. */
@@ -6230,6 +6232,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
 {
        struct pending_free *pf;
        unsigned long flags;
+       bool need_callback;
 
        if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
                return;
@@ -6241,14 +6244,18 @@ static void free_zapped_rcu(struct rcu_head *ch)
        pf = delayed_free.pf + (delayed_free.index ^ 1);
        __free_zapped_classes(pf);
        delayed_free.scheduled = false;
+       need_callback =
+               prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
+       lockdep_unlock();
+       raw_local_irq_restore(flags);
 
        /*
-        * If there's anything on the open list, close and start a new callback.
-        */
-       call_rcu_zapped(delayed_free.pf + delayed_free.index);
+       * If there's pending free and its callback has not been scheduled,
+       * queue an RCU callback.
+       */
+       if (need_callback)
+               call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
 
-       lockdep_unlock();
-       raw_local_irq_restore(flags);
 }
 
 /*
@@ -6288,6 +6295,7 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
 {
        struct pending_free *pf;
        unsigned long flags;
+       bool need_callback;
 
        init_data_structures_once();
 
@@ -6295,10 +6303,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
        lockdep_lock();
        pf = get_pending_free();
        __lockdep_free_key_range(pf, start, size);
-       call_rcu_zapped(pf);
+       need_callback = prepare_call_rcu_zapped(pf);
        lockdep_unlock();
        raw_local_irq_restore(flags);
-
+       if (need_callback)
+               call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
        /*
         * Wait for any possible iterators from look_up_lock_class() to pass
         * before continuing to free the memory they refer to.
@@ -6392,6 +6401,7 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
        struct pending_free *pf;
        unsigned long flags;
        int locked;
+       bool need_callback = false;
 
        raw_local_irq_save(flags);
        locked = graph_lock();
@@ -6400,11 +6410,13 @@ static void lockdep_reset_lock_reg(struct lockdep_map *lock)
 
        pf = get_pending_free();
        __lockdep_reset_lock(pf, lock);
-       call_rcu_zapped(pf);
+       need_callback = prepare_call_rcu_zapped(pf);
 
        graph_unlock();
 out_irq:
        raw_local_irq_restore(flags);
+       if (need_callback)
+               call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
 }
 
 /*
@@ -6448,6 +6460,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
        struct pending_free *pf;
        unsigned long flags;
        bool found = false;
+       bool need_callback = false;
 
        might_sleep();
 
@@ -6468,11 +6481,14 @@ void lockdep_unregister_key(struct lock_class_key *key)
        if (found) {
                pf = get_pending_free();
                __lockdep_free_key_range(pf, key, 1);
-               call_rcu_zapped(pf);
+               need_callback = prepare_call_rcu_zapped(pf);
        }
        lockdep_unlock();
        raw_local_irq_restore(flags);
 
+       if (need_callback)
+               call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+
        /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
        synchronize_rcu();
 }
This page took 0.051031 seconds and 4 git commands to generate.