]> Git Repo - J-linux.git/commitdiff
Merge tag 'locking-urgent-2024-09-29' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <[email protected]>
Sun, 29 Sep 2024 15:51:30 +0000 (08:51 -0700)
committerLinus Torvalds <[email protected]>
Sun, 29 Sep 2024 15:51:30 +0000 (08:51 -0700)
Pull locking updates from Ingo Molnar:
 "lockdep:
    - Fix potential deadlock between lockdep and RCU (Zhiguo Niu)
    - Use str_plural() to address Coccinelle warning (Thorsten Blum)
    - Add debuggability enhancement (Luis Claudio R. Goncalves)

  static keys & calls:
    - Fix static_key_slow_dec() yet again (Peter Zijlstra)
    - Handle module init failure correctly in static_call_del_module()
      (Thomas Gleixner)
    - Replace pointless WARN_ON() in static_call_module_notify() (Thomas
      Gleixner)

  <linux/cleanup.h>:
    - Add usage and style documentation (Dan Williams)

  rwsems:
    - Move is_rwsem_reader_owned() and rwsem_owner() under
      CONFIG_DEBUG_RWSEMS (Waiman Long)

  atomic ops, x86:
    - Redeclare x86_32 arch_atomic64_{add,sub}() as void (Uros Bizjak)
    - Introduce the read64_nonatomic macro to x86_32 with cx8 (Uros
      Bizjak)"

Signed-off-by: Ingo Molnar <[email protected]>
* tag 'locking-urgent-2024-09-29' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/rwsem: Move is_rwsem_reader_owned() and rwsem_owner() under CONFIG_DEBUG_RWSEMS
  jump_label: Fix static_key_slow_dec() yet again
  static_call: Replace pointless WARN_ON() in static_call_module_notify()
  static_call: Handle module init failure correctly in static_call_del_module()
  locking/lockdep: Simplify character output in seq_line()
  lockdep: fix deadlock issue between lockdep and rcu
  lockdep: Use str_plural() to fix Coccinelle warning
  cleanup: Add usage and style documentation
  lockdep: suggest the fix for "lockdep bfs error:-1" on print_bfs_bug
  locking/atomic/x86: Redeclare x86_32 arch_atomic64_{add,sub}() as void
  locking/atomic/x86: Introduce the read64_nonatomic macro to x86_32 with cx8

1  2 
Documentation/core-api/index.rst
include/linux/cleanup.h
kernel/locking/lockdep.c
kernel/locking/rwsem.c

index e18a2ffe07877d7a609a17531aa19466ea03f26f,b99d2fb3e2f14350095ce93eab5dd889f60cb4fa..a331d2c814f57d65313ef42897c21204b995e816
@@@ -35,6 -35,7 +35,7 @@@ Library functionality that is used thro
  
     kobject
     kref
+    cleanup
     assoc_array
     xarray
     maple_tree
@@@ -49,7 -50,6 +50,7 @@@
     wrappers/atomic_t
     wrappers/atomic_bitops
     floating-point
 +   union_find
  
  Low level entry and exit
  ========================
diff --combined include/linux/cleanup.h
index a3d3e888cf1f3b90adf02d16b67fa0aee1e53711,9c6b4f2c017659a89c0a126e815bfe5c59bd08f3..038b2d523bf884e969e67e3ab038ed5f7d6657da
  
  #include <linux/compiler.h>
  
+ /**
+  * DOC: scope-based cleanup helpers
+  *
+  * The "goto error" pattern is notorious for introducing subtle resource
+  * leaks. It is tedious and error prone to add new resource acquisition
+  * constraints into code paths that already have several unwind
+  * conditions. The "cleanup" helpers enable the compiler to help with
+  * this tedium and can aid in maintaining LIFO (last in first out)
+  * unwind ordering to avoid unintentional leaks.
+  *
+  * As drivers make up the majority of the kernel code base, here is an
+  * example of using these helpers to clean up PCI drivers. The target of
+  * the cleanups are occasions where a goto is used to unwind a device
+  * reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
+  * before returning.
+  *
+  * The DEFINE_FREE() macro can arrange for PCI device references to be
+  * dropped when the associated variable goes out of scope::
+  *
+  *    DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
+  *    ...
+  *    struct pci_dev *dev __free(pci_dev_put) =
+  *            pci_get_slot(parent, PCI_DEVFN(0, 0));
+  *
+  * The above will automatically call pci_dev_put() if @dev is non-NULL
+  * when @dev goes out of scope (automatic variable scope). If a function
+  * wants to invoke pci_dev_put() on error, but return @dev (i.e. without
+  * freeing it) on success, it can do::
+  *
+  *    return no_free_ptr(dev);
+  *
+  * ...or::
+  *
+  *    return_ptr(dev);
+  *
+  * The DEFINE_GUARD() macro can arrange for the PCI device lock to be
+  * dropped when the scope where guard() is invoked ends::
+  *
+  *    DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
+  *    ...
+  *    guard(pci_dev)(dev);
+  *
+  * The lifetime of the lock obtained by the guard() helper follows the
+  * scope of automatic variable declaration. Take the following example::
+  *
+  *    func(...)
+  *    {
+  *            if (...) {
+  *                    ...
+  *                    guard(pci_dev)(dev); // pci_dev_lock() invoked here
+  *                    ...
+  *            } // <- implied pci_dev_unlock() triggered here
+  *    }
+  *
+  * Observe the lock is held for the remainder of the "if ()" block not
+  * the remainder of "func()".
+  *
+  * Now, when a function uses both __free() and guard(), or multiple
+  * instances of __free(), the LIFO order of variable definition order
+  * matters. GCC documentation says:
+  *
+  * "When multiple variables in the same scope have cleanup attributes,
+  * at exit from the scope their associated cleanup functions are run in
+  * reverse order of definition (last defined, first cleanup)."
+  *
+  * When the unwind order matters it requires that variables be defined
+  * mid-function scope rather than at the top of the file.  Take the
+  * following example and notice the bug highlighted by "!!"::
+  *
+  *    LIST_HEAD(list);
+  *    DEFINE_MUTEX(lock);
+  *
+  *    struct object {
+  *            struct list_head node;
+  *    };
+  *
+  *    static struct object *alloc_add(void)
+  *    {
+  *            struct object *obj;
+  *
+  *            lockdep_assert_held(&lock);
+  *            obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+  *            if (obj) {
+  *                    LIST_HEAD_INIT(&obj->node);
+  *                    list_add(obj->node, &list):
+  *            }
+  *            return obj;
+  *    }
+  *
+  *    static void remove_free(struct object *obj)
+  *    {
+  *            lockdep_assert_held(&lock);
+  *            list_del(&obj->node);
+  *            kfree(obj);
+  *    }
+  *
+  *    DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
+  *    static int init(void)
+  *    {
+  *            struct object *obj __free(remove_free) = NULL;
+  *            int err;
+  *
+  *            guard(mutex)(&lock);
+  *            obj = alloc_add();
+  *
+  *            if (!obj)
+  *                    return -ENOMEM;
+  *
+  *            err = other_init(obj);
+  *            if (err)
+  *                    return err; // remove_free() called without the lock!!
+  *
+  *            no_free_ptr(obj);
+  *            return 0;
+  *    }
+  *
+  * That bug is fixed by changing init() to call guard() and define +
+  * initialize @obj in this order::
+  *
+  *    guard(mutex)(&lock);
+  *    struct object *obj __free(remove_free) = alloc_add();
+  *
+  * Given that the "__free(...) = NULL" pattern for variables defined at
+  * the top of the function poses this potential interdependency problem
+  * the recommendation is to always define and assign variables in one
+  * statement and not group variable definitions at the top of the
+  * function when __free() is used.
+  *
+  * Lastly, given that the benefit of cleanup helpers is removal of
+  * "goto", and that the "goto" statement can jump between scopes, the
+  * expectation is that usage of "goto" and cleanup helpers is never
+  * mixed in the same function. I.e. for a given routine, convert all
+  * resources that need a "goto" cleanup to scope-based cleanup, or
+  * convert none of them.
+  */
  /*
   * DEFINE_FREE(name, type, free):
   *    simple helper macro that defines the required wrapper for a __free()
@@@ -98,7 -234,7 +234,7 @@@ const volatile void * __must_check_fn(c
   * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
   *
   *    CLASS(fdget, f)(fd);
 - *    if (!f.file)
 + *    if (!fd_file(f))
   *            return -EBADF;
   *
   *    // use 'f' without concern
diff --combined kernel/locking/lockdep.c
index 7963deac33c31e763bb51e8d2c02266e2c3cce21,364ae0b55beeaedbdaf61e7cd58d281d3b33f061..536bd471557f5b4412d6babc480d0832b4ee9d51
@@@ -56,7 -56,6 +56,7 @@@
  #include <linux/kprobes.h>
  #include <linux/lockdep.h>
  #include <linux/context_tracking.h>
 +#include <linux/console.h>
  
  #include <asm/sections.h>
  
@@@ -574,10 -573,8 +574,10 @@@ static struct lock_trace *save_trace(vo
                if (!debug_locks_off_graph_unlock())
                        return NULL;
  
 +              nbcon_cpu_emergency_enter();
                print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
                dump_stack();
 +              nbcon_cpu_emergency_exit();
  
                return NULL;
        }
@@@ -788,7 -785,7 +788,7 @@@ static void lockdep_print_held_locks(st
                printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
        else
                printk("%d lock%s held by %s/%d:\n", depth,
-                      depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
+                      str_plural(depth), p->comm, task_pid_nr(p));
        /*
         * It's not reliable to print a task's held locks if it's not sleeping
         * and it's not the current task.
@@@ -890,13 -887,11 +890,13 @@@ look_up_lock_class(const struct lockdep
        if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
                instrumentation_begin();
                debug_locks_off();
 +              nbcon_cpu_emergency_enter();
                printk(KERN_ERR
                        "BUG: looking up invalid subclass: %u\n", subclass);
                printk(KERN_ERR
                        "turning off the locking correctness validator.\n");
                dump_stack();
 +              nbcon_cpu_emergency_exit();
                instrumentation_end();
                return NULL;
        }
@@@ -973,13 -968,11 +973,13 @@@ static bool assign_lock_key(struct lock
        else {
                /* Debug-check: all keys must be persistent! */
                debug_locks_off();
 +              nbcon_cpu_emergency_enter();
                pr_err("INFO: trying to register non-static key.\n");
                pr_err("The code is fine but needs lockdep annotation, or maybe\n");
                pr_err("you didn't initialize this object before use?\n");
                pr_err("turning off the locking correctness validator.\n");
                dump_stack();
 +              nbcon_cpu_emergency_exit();
                return false;
        }
  
@@@ -1323,10 -1316,8 +1323,10 @@@ register_lock_class(struct lockdep_map 
                        return NULL;
                }
  
 +              nbcon_cpu_emergency_enter();
                print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
                dump_stack();
 +              nbcon_cpu_emergency_exit();
                return NULL;
        }
        nr_lock_classes++;
        if (verbose(class)) {
                graph_unlock();
  
 +              nbcon_cpu_emergency_enter();
                printk("\nnew class %px: %s", class->key, class->name);
                if (class->name_version > 1)
                        printk(KERN_CONT "#%d", class->name_version);
                printk(KERN_CONT "\n");
                dump_stack();
 +              nbcon_cpu_emergency_exit();
  
                if (!graph_lock()) {
                        return NULL;
@@@ -1403,10 -1392,8 +1403,10 @@@ static struct lock_list *alloc_list_ent
                if (!debug_locks_off_graph_unlock())
                        return NULL;
  
 +              nbcon_cpu_emergency_enter();
                print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
                dump_stack();
 +              nbcon_cpu_emergency_exit();
                return NULL;
        }
        nr_list_entries++;
@@@ -2052,8 -2039,6 +2052,8 @@@ static noinline void print_circular_bug
  
        depth = get_lock_depth(target);
  
 +      nbcon_cpu_emergency_enter();
 +
        print_circular_bug_header(target, depth, check_src, check_tgt);
  
        parent = get_lock_parent(target);
  
        printk("\nstack backtrace:\n");
        dump_stack();
 +
 +      nbcon_cpu_emergency_exit();
  }
  
  static noinline void print_bfs_bug(int ret)
        /*
         * Breadth-first-search failed, graph got corrupted?
         */
+       if (ret == BFS_EQUEUEFULL)
+               pr_warn("Increase LOCKDEP_CIRCULAR_QUEUE_BITS to avoid this warning:\n");
        WARN(1, "lockdep bfs error:%d\n", ret);
  }
  
@@@ -2586,8 -2572,6 +2589,8 @@@ print_bad_irq_dependency(struct task_st
        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
                return;
  
 +      nbcon_cpu_emergency_enter();
 +
        pr_warn("\n");
        pr_warn("=====================================================\n");
        pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
        pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
        next_root->trace = save_trace();
        if (!next_root->trace)
 -              return;
 +              goto out;
        print_shortest_lock_dependencies(forwards_entry, next_root);
  
        pr_warn("\nstack backtrace:\n");
        dump_stack();
 +out:
 +      nbcon_cpu_emergency_exit();
  }
  
  static const char *state_names[] = {
@@@ -3008,8 -2990,6 +3011,8 @@@ print_deadlock_bug(struct task_struct *
        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
                return;
  
 +      nbcon_cpu_emergency_enter();
 +
        pr_warn("\n");
        pr_warn("============================================\n");
        pr_warn("WARNING: possible recursive locking detected\n");
  
        pr_warn("\nstack backtrace:\n");
        dump_stack();
 +
 +      nbcon_cpu_emergency_exit();
  }
  
  /*
@@@ -3631,8 -3609,6 +3634,8 @@@ static void print_collision(struct task
                        struct held_lock *hlock_next,
                        struct lock_chain *chain)
  {
 +      nbcon_cpu_emergency_enter();
 +
        pr_warn("\n");
        pr_warn("============================\n");
        pr_warn("WARNING: chain_key collision\n");
  
        pr_warn("\nstack backtrace:\n");
        dump_stack();
 +
 +      nbcon_cpu_emergency_exit();
  }
  #endif
  
@@@ -3741,10 -3715,8 +3744,10 @@@ static inline int add_chain_cache(struc
                if (!debug_locks_off_graph_unlock())
                        return 0;
  
 +              nbcon_cpu_emergency_enter();
                print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
                dump_stack();
 +              nbcon_cpu_emergency_exit();
                return 0;
        }
        chain->chain_key = chain_key;
                if (!debug_locks_off_graph_unlock())
                        return 0;
  
 +              nbcon_cpu_emergency_enter();
                print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
                dump_stack();
 +              nbcon_cpu_emergency_exit();
                return 0;
        }
  
@@@ -4003,8 -3973,6 +4006,8 @@@ print_usage_bug(struct task_struct *cur
        if (!debug_locks_off() || debug_locks_silent)
                return;
  
 +      nbcon_cpu_emergency_enter();
 +
        pr_warn("\n");
        pr_warn("================================\n");
        pr_warn("WARNING: inconsistent lock state\n");
  
        pr_warn("\nstack backtrace:\n");
        dump_stack();
 +
 +      nbcon_cpu_emergency_exit();
  }
  
  /*
@@@ -4069,8 -4035,6 +4072,8 @@@ print_irq_inversion_bug(struct task_str
        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
                return;
  
 +      nbcon_cpu_emergency_enter();
 +
        pr_warn("\n");
        pr_warn("========================================================\n");
        pr_warn("WARNING: possible irq lock inversion dependency detected\n");
        pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
        root->trace = save_trace();
        if (!root->trace)
 -              return;
 +              goto out;
        print_shortest_lock_dependencies(other, root);
  
        pr_warn("\nstack backtrace:\n");
        dump_stack();
 +out:
 +      nbcon_cpu_emergency_exit();
  }
  
  /*
@@@ -4194,8 -4156,6 +4197,8 @@@ void print_irqtrace_events(struct task_
  {
        const struct irqtrace_events *trace = &curr->irqtrace;
  
 +      nbcon_cpu_emergency_enter();
 +
        printk("irq event stamp: %u\n", trace->irq_events);
        printk("hardirqs last  enabled at (%u): [<%px>] %pS\n",
                trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
        printk("softirqs last disabled at (%u): [<%px>] %pS\n",
                trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
                (void *)trace->softirq_disable_ip);
 +
 +      nbcon_cpu_emergency_exit();
  }
  
  static int HARDIRQ_verbose(struct lock_class *class)
@@@ -4731,12 -4689,10 +4734,12 @@@ unlock
         * We must printk outside of the graph_lock:
         */
        if (ret == 2) {
 +              nbcon_cpu_emergency_enter();
                printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
                print_lock(this);
                print_irqtrace_events(curr);
                dump_stack();
 +              nbcon_cpu_emergency_exit();
        }
  
        return ret;
@@@ -4777,8 -4733,6 +4780,8 @@@ print_lock_invalid_wait_context(struct 
        if (debug_locks_silent)
                return 0;
  
 +      nbcon_cpu_emergency_enter();
 +
        pr_warn("\n");
        pr_warn("=============================\n");
        pr_warn("[ BUG: Invalid wait context ]\n");
        pr_warn("stack backtrace:\n");
        dump_stack();
  
 +      nbcon_cpu_emergency_exit();
 +
        return 0;
  }
  
@@@ -5007,8 -4959,6 +5010,8 @@@ print_lock_nested_lock_not_held(struct 
        if (debug_locks_silent)
                return;
  
 +      nbcon_cpu_emergency_enter();
 +
        pr_warn("\n");
        pr_warn("==================================\n");
        pr_warn("WARNING: Nested lock was not taken\n");
  
        pr_warn("\nstack backtrace:\n");
        dump_stack();
 +
 +      nbcon_cpu_emergency_exit();
  }
  
  static int __lock_is_held(const struct lockdep_map *lock, int read);
@@@ -5079,13 -5027,11 +5082,13 @@@ static int __lock_acquire(struct lockde
        debug_class_ops_inc(class);
  
        if (very_verbose(class)) {
 +              nbcon_cpu_emergency_enter();
                printk("\nacquire class [%px] %s", class->key, class->name);
                if (class->name_version > 1)
                        printk(KERN_CONT "#%d", class->name_version);
                printk(KERN_CONT "\n");
                dump_stack();
 +              nbcon_cpu_emergency_exit();
        }
  
        /*
  #endif
        if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
                debug_locks_off();
 +              nbcon_cpu_emergency_enter();
                print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
                printk(KERN_DEBUG "depth: %i  max: %lu!\n",
                       curr->lockdep_depth, MAX_LOCK_DEPTH);
                lockdep_print_held_locks(current);
                debug_show_all_locks();
                dump_stack();
 +              nbcon_cpu_emergency_exit();
  
                return 0;
        }
@@@ -5240,8 -5184,6 +5243,8 @@@ static void print_unlock_imbalance_bug(
        if (debug_locks_silent)
                return;
  
 +      nbcon_cpu_emergency_enter();
 +
        pr_warn("\n");
        pr_warn("=====================================\n");
        pr_warn("WARNING: bad unlock balance detected!\n");
  
        pr_warn("\nstack backtrace:\n");
        dump_stack();
 +
 +      nbcon_cpu_emergency_exit();
  }
  
  static noinstr int match_held_lock(const struct held_lock *hlock,
@@@ -5964,8 -5904,6 +5967,8 @@@ static void print_lock_contention_bug(s
        if (debug_locks_silent)
                return;
  
 +      nbcon_cpu_emergency_enter();
 +
        pr_warn("\n");
        pr_warn("=================================\n");
        pr_warn("WARNING: bad contention detected!\n");
  
        pr_warn("\nstack backtrace:\n");
        dump_stack();
 +
 +      nbcon_cpu_emergency_exit();
  }
  
  static void
@@@ -6263,25 -6199,27 +6266,27 @@@ static struct pending_free *get_pending
  static void free_zapped_rcu(struct rcu_head *cb);
  
  /*
-  * Schedule an RCU callback if no RCU callback is pending. Must be called with
-  * the graph lock held.
-  */
- static void call_rcu_zapped(struct pending_free *pf)
+ * See if we need to queue an RCU callback, must called with
+ * the lockdep lock held, returns false if either we don't have
+ * any pending free or the callback is already scheduled.
+ * Otherwise, a call_rcu() must follow this function call.
+ */
+ static bool prepare_call_rcu_zapped(struct pending_free *pf)
  {
        WARN_ON_ONCE(inside_selftest());
  
        if (list_empty(&pf->zapped))
-               return;
+               return false;
  
        if (delayed_free.scheduled)
-               return;
+               return false;
  
        delayed_free.scheduled = true;
  
        WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
        delayed_free.index ^= 1;
  
-       call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
+       return true;
  }
  
  /* The caller must hold the graph lock. May be called from RCU context. */
@@@ -6307,6 -6245,7 +6312,7 @@@ static void free_zapped_rcu(struct rcu_
  {
        struct pending_free *pf;
        unsigned long flags;
+       bool need_callback;
  
        if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
                return;
        pf = delayed_free.pf + (delayed_free.index ^ 1);
        __free_zapped_classes(pf);
        delayed_free.scheduled = false;
+       need_callback =
+               prepare_call_rcu_zapped(delayed_free.pf + delayed_free.index);
+       lockdep_unlock();
+       raw_local_irq_restore(flags);
  
        /*
-        * If there's anything on the open list, close and start a new callback.
-        */
-       call_rcu_zapped(delayed_free.pf + delayed_free.index);
+       * If there's pending free and its callback has not been scheduled,
+       * queue an RCU callback.
+       */
+       if (need_callback)
+               call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
  
-       lockdep_unlock();
-       raw_local_irq_restore(flags);
  }
  
  /*
@@@ -6365,6 -6308,7 +6375,7 @@@ static void lockdep_free_key_range_reg(
  {
        struct pending_free *pf;
        unsigned long flags;
+       bool need_callback;
  
        init_data_structures_once();
  
        lockdep_lock();
        pf = get_pending_free();
        __lockdep_free_key_range(pf, start, size);
-       call_rcu_zapped(pf);
+       need_callback = prepare_call_rcu_zapped(pf);
        lockdep_unlock();
        raw_local_irq_restore(flags);
+       if (need_callback)
+               call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
        /*
         * Wait for any possible iterators from look_up_lock_class() to pass
         * before continuing to free the memory they refer to.
@@@ -6469,6 -6414,7 +6481,7 @@@ static void lockdep_reset_lock_reg(stru
        struct pending_free *pf;
        unsigned long flags;
        int locked;
+       bool need_callback = false;
  
        raw_local_irq_save(flags);
        locked = graph_lock();
  
        pf = get_pending_free();
        __lockdep_reset_lock(pf, lock);
-       call_rcu_zapped(pf);
+       need_callback = prepare_call_rcu_zapped(pf);
  
        graph_unlock();
  out_irq:
        raw_local_irq_restore(flags);
+       if (need_callback)
+               call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
  }
  
  /*
@@@ -6525,6 -6473,7 +6540,7 @@@ void lockdep_unregister_key(struct lock
        struct pending_free *pf;
        unsigned long flags;
        bool found = false;
+       bool need_callback = false;
  
        might_sleep();
  
        if (found) {
                pf = get_pending_free();
                __lockdep_free_key_range(pf, key, 1);
-               call_rcu_zapped(pf);
+               need_callback = prepare_call_rcu_zapped(pf);
        }
        lockdep_unlock();
        raw_local_irq_restore(flags);
  
+       if (need_callback)
+               call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
        /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
        synchronize_rcu();
  }
@@@ -6603,8 -6555,6 +6622,8 @@@ print_freed_lock_bug(struct task_struc
        if (debug_locks_silent)
                return;
  
 +      nbcon_cpu_emergency_enter();
 +
        pr_warn("\n");
        pr_warn("=========================\n");
        pr_warn("WARNING: held lock freed!\n");
  
        pr_warn("\nstack backtrace:\n");
        dump_stack();
 +
 +      nbcon_cpu_emergency_exit();
  }
  
  static inline int not_in_range(const void* mem_from, unsigned long mem_len,
@@@ -6665,8 -6613,6 +6684,8 @@@ static void print_held_locks_bug(void
        if (debug_locks_silent)
                return;
  
 +      nbcon_cpu_emergency_enter();
 +
        pr_warn("\n");
        pr_warn("====================================\n");
        pr_warn("WARNING: %s/%d still has locks held!\n",
        lockdep_print_held_locks(current);
        pr_warn("\nstack backtrace:\n");
        dump_stack();
 +
 +      nbcon_cpu_emergency_exit();
  }
  
  void debug_check_no_locks_held(void)
@@@ -6735,7 -6679,6 +6754,7 @@@ asmlinkage __visible void lockdep_sys_e
        if (unlikely(curr->lockdep_depth)) {
                if (!debug_locks_off())
                        return;
 +              nbcon_cpu_emergency_enter();
                pr_warn("\n");
                pr_warn("================================================\n");
                pr_warn("WARNING: lock held when returning to user space!\n");
                pr_warn("%s/%d is leaving the kernel with locks still held!\n",
                                curr->comm, curr->pid);
                lockdep_print_held_locks(curr);
 +              nbcon_cpu_emergency_exit();
        }
  
        /*
@@@ -6761,7 -6703,6 +6780,7 @@@ void lockdep_rcu_suspicious(const char 
        bool rcu = warn_rcu_enter();
  
        /* Note: the following can be executed concurrently, so be careful. */
 +      nbcon_cpu_emergency_enter();
        pr_warn("\n");
        pr_warn("=============================\n");
        pr_warn("WARNING: suspicious RCU usage\n");
        lockdep_print_held_locks(curr);
        pr_warn("\nstack backtrace:\n");
        dump_stack();
 +      nbcon_cpu_emergency_exit();
        warn_rcu_exit(rcu);
  }
  EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
diff --combined kernel/locking/rwsem.c
index 5ded7dff46efadcbebc34ea52fe308a40da5af3e,4b041e9c408fee2e0e48417eaa6cee6cbd3af301..2bbb6eca51445bdf434ba579ced4beddafbc52ca
@@@ -181,12 -181,21 +181,21 @@@ static inline void rwsem_set_reader_own
        __rwsem_set_reader_owned(sem, current);
  }
  
+ #ifdef CONFIG_DEBUG_RWSEMS
+ /*
+  * Return just the real task structure pointer of the owner
+  */
+ static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
+ {
+       return (struct task_struct *)
+               (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
+ }
  /*
   * Return true if the rwsem is owned by a reader.
   */
  static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
  {
- #ifdef CONFIG_DEBUG_RWSEMS
        /*
         * Check the count to see if it is write-locked.
         */
  
        if (count & RWSEM_WRITER_MASK)
                return false;
- #endif
        return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
  }
  
- #ifdef CONFIG_DEBUG_RWSEMS
  /*
   * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
   * is a task pointer in owner of a reader-owned rwsem, it will be the
@@@ -265,15 -272,6 +272,6 @@@ static inline bool rwsem_write_trylock(
        return false;
  }
  
- /*
-  * Return just the real task structure pointer of the owner
-  */
- static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
- {
-       return (struct task_struct *)
-               (atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
- }
  /*
   * Return the real task structure pointer of the owner and the embedded
   * flags in the owner. pflags must be non-NULL.
@@@ -631,7 -629,7 +629,7 @@@ static inline bool rwsem_try_write_lock
                         * if it is an RT task or wait in the wait queue
                         * for too long.
                         */
 -                      if (has_handoff || (!rt_task(waiter->task) &&
 +                      if (has_handoff || (!rt_or_dl_task(waiter->task) &&
                                            !time_after(jiffies, waiter->timeout)))
                                return false;
  
@@@ -914,7 -912,7 +912,7 @@@ static bool rwsem_optimistic_spin(struc
                if (owner_state != OWNER_WRITER) {
                        if (need_resched())
                                break;
 -                      if (rt_task(current) &&
 +                      if (rt_or_dl_task(current) &&
                           (prev_owner_state != OWNER_WRITER))
                                break;
                }
This page took 0.082211 seconds and 4 git commands to generate.