]> Git Repo - linux.git/commitdiff
Merge branch 'core-stacktrace-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <[email protected]>
Mon, 6 May 2019 20:11:48 +0000 (13:11 -0700)
committerLinus Torvalds <[email protected]>
Mon, 6 May 2019 20:11:48 +0000 (13:11 -0700)
Pull stack trace updates from Ingo Molnar:
 "So Thomas looked at the stacktrace code recently and noticed a few
  weirdnesses, and we all know how such stories of crummy kernel code
  meeting German engineering perfection end: a 45-patch series to clean
  it all up! :-)

  Here's the changes in Thomas's words:

   'Struct stack_trace is a sinkhole for input and output parameters
    which is largely pointless for most usage sites. In fact if embedded
    into other data structures it creates indirections and extra storage
    overhead for no benefit.

    Looking at all usage sites makes it clear that they just require an
    interface which is based on a storage array. That array is either on
    stack, global or embedded into some other data structure.

    Some of the stack depot usage sites are outright wrong, but
    fortunately the wrongness just causes more stack being used for
    nothing and does not have functional impact.

    Another oddity is the inconsistent termination of the stack trace
    with ULONG_MAX. It's pointless as the number of entries is what
    determines the length of the stored trace. In fact quite some call
    sites remove the ULONG_MAX marker afterwards with or without nasty
    comments about it. Not all architectures do that and those which do,
    do it inconsistenly either conditional on nr_entries == 0 or
    unconditionally.

    The following series cleans that up by:

      1) Removing the ULONG_MAX termination in the architecture code

      2) Removing the ULONG_MAX fixups at the call sites

      3) Providing plain storage array based interfaces for stacktrace
         and stackdepot.

      4) Cleaning up the mess at the callsites including some related
         cleanups.

      5) Removing the struct stack_trace based interfaces

    This is not changing the struct stack_trace interfaces at the
    architecture level, but it removes the exposure to the generic
    code'"

* 'core-stacktrace-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (45 commits)
  x86/stacktrace: Use common infrastructure
  stacktrace: Provide common infrastructure
  lib/stackdepot: Remove obsolete functions
  stacktrace: Remove obsolete functions
  livepatch: Simplify stack trace retrieval
  tracing: Remove the last struct stack_trace usage
  tracing: Simplify stack trace retrieval
  tracing: Make ftrace_trace_userstack() static and conditional
  tracing: Use percpu stack trace buffer more intelligently
  tracing: Simplify stacktrace retrieval in histograms
  lockdep: Simplify stack trace handling
  lockdep: Remove save argument from check_prev_add()
  lockdep: Remove unused trace argument from print_circular_bug()
  drm: Simplify stacktrace handling
  dm persistent data: Simplify stack trace handling
  dm bufio: Simplify stack trace retrieval
  btrfs: ref-verify: Simplify stack trace retrieval
  dma/debug: Simplify stracktrace retrieval
  fault-inject: Simplify stacktrace retrieval
  mm/page_owner: Simplify stack trace handling
  ...

1  2 
arch/x86/Kconfig
kernel/locking/lockdep.c
kernel/trace/trace.c
mm/kasan/common.c
mm/kasan/report.c
mm/kmemleak.c

diff --combined arch/x86/Kconfig
index 406a0cf30c576e45c283374e5c47bb844f04230b,b5978e35a8a826de55540116f22391f16e5125c5..f1162df4a805d8d2079451545d56e4efa95c892a
@@@ -74,6 -74,7 +74,7 @@@ config X8
        select ARCH_MIGHT_HAVE_ACPI_PDC         if ACPI
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
+       select ARCH_STACKWALK
        select ARCH_SUPPORTS_ACPI
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_NUMA_BALANCING     if X86_64
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_RCU_TABLE_FREE              if PARAVIRT
 -      select HAVE_RCU_TABLE_INVALIDATE        if HAVE_RCU_TABLE_FREE
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_RELIABLE_STACKTRACE         if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
        select HAVE_FUNCTION_ARG_ACCESS_API
@@@ -1498,7 -1500,7 +1499,7 @@@ config X86_CPA_STATISTIC
        depends on DEBUG_FS
        ---help---
          Expose statistics about the Change Page Attribute mechanims, which
 -        helps to determine the effectivness of preserving large and huge
 +        helps to determine the effectiveness of preserving large and huge
          page mappings when mapping protections are changed.
  
  config ARCH_HAS_MEM_ENCRYPT
diff --combined kernel/locking/lockdep.c
index e221be724fe82f0dbf7dc0426041b3ab6e5f01cf,45bcaf2e4cb6b2db760b1ac26e948fd099feb8d2..91c6b89f04df32729abad58116be87e419a1bc1a
@@@ -434,29 -434,14 +434,14 @@@ static void print_lockdep_off(const cha
  #endif
  }
  
- static int save_trace(struct stack_trace *trace)
+ static int save_trace(struct lock_trace *trace)
  {
-       trace->nr_entries = 0;
-       trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
-       trace->entries = stack_trace + nr_stack_trace_entries;
-       trace->skip = 3;
-       save_stack_trace(trace);
-       /*
-        * Some daft arches put -1 at the end to indicate its a full trace.
-        *
-        * <rant> this is buggy anyway, since it takes a whole extra entry so a
-        * complete trace that maxes out the entries provided will be reported
-        * as incomplete, friggin useless </rant>
-        */
-       if (trace->nr_entries != 0 &&
-           trace->entries[trace->nr_entries-1] == ULONG_MAX)
-               trace->nr_entries--;
-       trace->max_entries = trace->nr_entries;
+       unsigned long *entries = stack_trace + nr_stack_trace_entries;
+       unsigned int max_entries;
  
+       trace->offset = nr_stack_trace_entries;
+       max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
+       trace->nr_entries = stack_trace_save(entries, max_entries, 3);
        nr_stack_trace_entries += trace->nr_entries;
  
        if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
@@@ -1207,7 -1192,7 +1192,7 @@@ static struct lock_list *alloc_list_ent
  static int add_lock_to_list(struct lock_class *this,
                            struct lock_class *links_to, struct list_head *head,
                            unsigned long ip, int distance,
-                           struct stack_trace *trace)
+                           struct lock_trace *trace)
  {
        struct lock_list *entry;
        /*
@@@ -1426,6 -1411,13 +1411,13 @@@ static inline int __bfs_backwards(struc
   * checking.
   */
  
+ static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
+ {
+       unsigned long *entries = stack_trace + trace->offset;
+       stack_trace_print(entries, trace->nr_entries, spaces);
+ }
  /*
   * Print a dependency chain entry (this is only done when a deadlock
   * has been detected):
@@@ -1438,8 -1430,7 +1430,7 @@@ print_circular_bug_entry(struct lock_li
        printk("\n-> #%u", depth);
        print_lock_name(target->class);
        printk(KERN_CONT ":\n");
-       print_stack_trace(&target->trace, 6);
+       print_lock_trace(&target->trace, 6);
        return 0;
  }
  
@@@ -1533,10 -1524,9 +1524,9 @@@ static inline int class_equal(struct lo
  }
  
  static noinline int print_circular_bug(struct lock_list *this,
-                               struct lock_list *target,
-                               struct held_lock *check_src,
-                               struct held_lock *check_tgt,
-                               struct stack_trace *trace)
+                                      struct lock_list *target,
+                                      struct held_lock *check_src,
+                                      struct held_lock *check_tgt)
  {
        struct task_struct *curr = current;
        struct lock_list *parent;
@@@ -1752,7 -1742,7 +1742,7 @@@ static void print_lock_class_header(str
  
                        len += printk("%*s   %s", depth, "", usage_str[bit]);
                        len += printk(KERN_CONT " at:\n");
-                       print_stack_trace(class->usage_traces + bit, len);
+                       print_lock_trace(class->usage_traces + bit, len);
                }
        }
        printk("%*s }\n", depth, "");
@@@ -1777,7 -1767,7 +1767,7 @@@ print_shortest_lock_dependencies(struc
        do {
                print_lock_class_header(entry->class, depth);
                printk("%*s ... acquired at:\n", depth, "");
-               print_stack_trace(&entry->trace, 2);
+               print_lock_trace(&entry->trace, 2);
                printk("\n");
  
                if (depth == 0 && (entry != root)) {
@@@ -1890,14 -1880,14 +1880,14 @@@ print_bad_irq_dependency(struct task_st
        print_lock_name(backwards_entry->class);
        pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
  
-       print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
+       print_lock_trace(backwards_entry->class->usage_traces + bit1, 1);
  
        pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
        print_lock_name(forwards_entry->class);
        pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
        pr_warn("...");
  
-       print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
+       print_lock_trace(forwards_entry->class->usage_traces + bit2, 1);
  
        pr_warn("\nother info that might help us debug this:\n\n");
        print_irq_lock_scenario(backwards_entry, forwards_entry,
@@@ -2170,8 -2160,7 +2160,7 @@@ check_deadlock(struct task_struct *curr
   */
  static int
  check_prev_add(struct task_struct *curr, struct held_lock *prev,
-              struct held_lock *next, int distance, struct stack_trace *trace,
-              int (*save)(struct stack_trace *trace))
+              struct held_lock *next, int distance, struct lock_trace *trace)
  {
        struct lock_list *uninitialized_var(target_entry);
        struct lock_list *entry;
        this.parent = NULL;
        ret = check_noncircular(&this, hlock_class(prev), &target_entry);
        if (unlikely(!ret)) {
-               if (!trace->entries) {
+               if (!trace->nr_entries) {
                        /*
-                        * If @save fails here, the printing might trigger
-                        * a WARN but because of the !nr_entries it should
-                        * not do bad things.
+                        * If save_trace fails here, the printing might
+                        * trigger a WARN but because of the !nr_entries it
+                        * should not do bad things.
                         */
-                       save(trace);
+                       save_trace(trace);
                }
-               return print_circular_bug(&this, target_entry, next, prev, trace);
+               return print_circular_bug(&this, target_entry, next, prev);
        }
        else if (unlikely(ret < 0))
                return print_bfs_bug(ret);
                return print_bfs_bug(ret);
  
  
-       if (!trace->entries && !save(trace))
+       if (!trace->nr_entries && !save_trace(trace))
                return 0;
  
        /*
  static int
  check_prevs_add(struct task_struct *curr, struct held_lock *next)
  {
+       struct lock_trace trace = { .nr_entries = 0 };
        int depth = curr->lockdep_depth;
        struct held_lock *hlock;
-       struct stack_trace trace = {
-               .nr_entries = 0,
-               .max_entries = 0,
-               .entries = NULL,
-               .skip = 0,
-       };
  
        /*
         * Debugging checks.
                 * added:
                 */
                if (hlock->read != 2 && hlock->check) {
-                       int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
+                       int ret = check_prev_add(curr, hlock, next, distance,
+                                                &trace);
                        if (!ret)
                                return 0;
  
@@@ -2731,6 -2716,10 +2716,10 @@@ static inline int validate_chain(struc
  {
        return 1;
  }
+ static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
+ {
+ }
  #endif
  
  /*
@@@ -2827,7 -2816,7 +2816,7 @@@ print_usage_bug(struct task_struct *cur
        print_lock(this);
  
        pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
-       print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
+       print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1);
  
        print_irqtrace_events(curr);
        pr_warn("\nother info that might help us debug this:\n");
@@@ -4907,9 -4896,8 +4896,9 @@@ void lockdep_unregister_key(struct lock
                return;
  
        raw_local_irq_save(flags);
 -      arch_spin_lock(&lockdep_lock);
 -      current->lockdep_recursion = 1;
 +      if (!graph_lock())
 +              goto out_irq;
 +
        pf = get_pending_free();
        hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
                if (k == key) {
        WARN_ON_ONCE(!found);
        __lockdep_free_key_range(pf, key, 1);
        call_rcu_zapped(pf);
 -      current->lockdep_recursion = 0;
 -      arch_spin_unlock(&lockdep_lock);
 +      graph_unlock();
 +out_irq:
        raw_local_irq_restore(flags);
  
        /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
diff --combined kernel/trace/trace.c
index ca1ee656d6d852952670b0e63fd19f7499c679a6,0ce8515dd47045086bae7f1f5edaaae7645db83a..ec439999f38748090616406f77b93afc6f39b07a
@@@ -159,6 -159,8 +159,8 @@@ static union trace_eval_map_item *trace
  #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
  
  static int tracing_set_tracer(struct trace_array *tr, const char *buf);
+ static void ftrace_trace_userstack(struct ring_buffer *buffer,
+                                  unsigned long flags, int pc);
  
  #define MAX_TRACER_SIZE               100
  static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
@@@ -496,10 -498,8 +498,10 @@@ int trace_pid_write(struct trace_pid_li
         * not modified.
         */
        pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
 -      if (!pid_list)
 +      if (!pid_list) {
 +              trace_parser_put(&parser);
                return -ENOMEM;
 +      }
  
        pid_list->pid_max = READ_ONCE(pid_max);
  
  
        pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
        if (!pid_list->pids) {
 +              trace_parser_put(&parser);
                kfree(pid_list);
                return -ENOMEM;
        }
@@@ -2752,12 -2751,21 +2754,21 @@@ trace_function(struct trace_array *tr
  
  #ifdef CONFIG_STACKTRACE
  
- #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
+ /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
+ #define FTRACE_KSTACK_NESTING 4
+ #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
  struct ftrace_stack {
-       unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
+       unsigned long           calls[FTRACE_KSTACK_ENTRIES];
+ };
+ struct ftrace_stacks {
+       struct ftrace_stack     stacks[FTRACE_KSTACK_NESTING];
  };
  
- static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
+ static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
  static DEFINE_PER_CPU(int, ftrace_stack_reserve);
  
  static void __ftrace_trace_stack(struct ring_buffer *buffer,
  {
        struct trace_event_call *call = &event_kernel_stack;
        struct ring_buffer_event *event;
+       unsigned int size, nr_entries;
+       struct ftrace_stack *fstack;
        struct stack_entry *entry;
-       struct stack_trace trace;
-       int use_stack;
-       int size = FTRACE_STACK_ENTRIES;
-       trace.nr_entries        = 0;
-       trace.skip              = skip;
+       int stackidx;
  
        /*
         * Add one, for this function and the call to save_stack_trace()
         */
  #ifndef CONFIG_UNWINDER_ORC
        if (!regs)
-               trace.skip++;
+               skip++;
  #endif
  
        /*
         */
        preempt_disable_notrace();
  
-       use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
+       stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
+       /* This should never happen. If it does, yell once and skip */
+       if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
+               goto out;
        /*
-        * We don't need any atomic variables, just a barrier.
-        * If an interrupt comes in, we don't care, because it would
-        * have exited and put the counter back to what we want.
-        * We just need a barrier to keep gcc from moving things
-        * around.
+        * The above __this_cpu_inc_return() is 'atomic' cpu local. An
+        * interrupt will either see the value pre increment or post
+        * increment. If the interrupt happens pre increment it will have
+        * restored the counter when it returns.  We just need a barrier to
+        * keep gcc from moving things around.
         */
        barrier();
-       if (use_stack == 1) {
-               trace.entries           = this_cpu_ptr(ftrace_stack.calls);
-               trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
  
-               if (regs)
-                       save_stack_trace_regs(regs, &trace);
-               else
-                       save_stack_trace(&trace);
-               if (trace.nr_entries > size)
-                       size = trace.nr_entries;
-       } else
-               /* From now on, use_stack is a boolean */
-               use_stack = 0;
+       fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
+       size = ARRAY_SIZE(fstack->calls);
  
-       size *= sizeof(unsigned long);
+       if (regs) {
+               nr_entries = stack_trace_save_regs(regs, fstack->calls,
+                                                  size, skip);
+       } else {
+               nr_entries = stack_trace_save(fstack->calls, size, skip);
+       }
  
+       size = nr_entries * sizeof(unsigned long);
        event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
                                            sizeof(*entry) + size, flags, pc);
        if (!event)
                goto out;
        entry = ring_buffer_event_data(event);
  
-       memset(&entry->caller, 0, size);
-       if (use_stack)
-               memcpy(&entry->caller, trace.entries,
-                      trace.nr_entries * sizeof(unsigned long));
-       else {
-               trace.max_entries       = FTRACE_STACK_ENTRIES;
-               trace.entries           = entry->caller;
-               if (regs)
-                       save_stack_trace_regs(regs, &trace);
-               else
-                       save_stack_trace(&trace);
-       }
-       entry->size = trace.nr_entries;
+       memcpy(&entry->caller, fstack->calls, size);
+       entry->size = nr_entries;
  
        if (!call_filter_check_discard(call, entry, buffer, event))
                __buffer_unlock_commit(buffer, event);
@@@ -2907,15 -2899,15 +2902,15 @@@ void trace_dump_stack(int skip
  }
  EXPORT_SYMBOL_GPL(trace_dump_stack);
  
+ #ifdef CONFIG_USER_STACKTRACE_SUPPORT
  static DEFINE_PER_CPU(int, user_stack_count);
  
- void
static void
  ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
  {
        struct trace_event_call *call = &event_user_stack;
        struct ring_buffer_event *event;
        struct userstack_entry *entry;
-       struct stack_trace trace;
  
        if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
                return;
        entry->tgid             = current->tgid;
        memset(&entry->caller, 0, sizeof(entry->caller));
  
-       trace.nr_entries        = 0;
-       trace.max_entries       = FTRACE_STACK_ENTRIES;
-       trace.skip              = 0;
-       trace.entries           = entry->caller;
-       save_stack_trace_user(&trace);
+       stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
        if (!call_filter_check_discard(call, entry, buffer, event))
                __buffer_unlock_commit(buffer, event);
  
   out:
        preempt_enable();
  }
- #ifdef UNUSED
static void __trace_userstack(struct trace_array *tr, unsigned long flags)
+ #else /* CONFIG_USER_STACKTRACE_SUPPORT */
+ static void ftrace_trace_userstack(struct ring_buffer *buffer,
                                 unsigned long flags, int pc)
  {
-       ftrace_trace_userstack(tr, flags, preempt_count());
  }
- #endif /* UNUSED */
+ #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
  
  #endif /* CONFIG_STACKTRACE */
  
@@@ -7028,43 -7014,35 +7017,43 @@@ struct buffer_ref 
        struct ring_buffer      *buffer;
        void                    *page;
        int                     cpu;
 -      int                     ref;
 +      refcount_t              refcount;
  };
  
 +static void buffer_ref_release(struct buffer_ref *ref)
 +{
 +      if (!refcount_dec_and_test(&ref->refcount))
 +              return;
 +      ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
 +      kfree(ref);
 +}
 +
  static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
                                    struct pipe_buffer *buf)
  {
        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
  
 -      if (--ref->ref)
 -              return;
 -
 -      ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
 -      kfree(ref);
 +      buffer_ref_release(ref);
        buf->private = 0;
  }
  
 -static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
 +static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
                                struct pipe_buffer *buf)
  {
        struct buffer_ref *ref = (struct buffer_ref *)buf->private;
  
 -      ref->ref++;
 +      if (refcount_read(&ref->refcount) > INT_MAX/2)
 +              return false;
 +
 +      refcount_inc(&ref->refcount);
 +      return true;
  }
  
  /* Pipe buffer operations for a buffer. */
  static const struct pipe_buf_operations buffer_pipe_buf_ops = {
        .confirm                = generic_pipe_buf_confirm,
        .release                = buffer_pipe_buf_release,
 -      .steal                  = generic_pipe_buf_steal,
 +      .steal                  = generic_pipe_buf_nosteal,
        .get                    = buffer_pipe_buf_get,
  };
  
@@@ -7077,7 -7055,11 +7066,7 @@@ static void buffer_spd_release(struct s
        struct buffer_ref *ref =
                (struct buffer_ref *)spd->partial[i].private;
  
 -      if (--ref->ref)
 -              return;
 -
 -      ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
 -      kfree(ref);
 +      buffer_ref_release(ref);
        spd->partial[i].private = 0;
  }
  
@@@ -7132,7 -7114,7 +7121,7 @@@ tracing_buffers_splice_read(struct fil
                        break;
                }
  
 -              ref->ref = 1;
 +              refcount_set(&ref->refcount, 1);
                ref->buffer = iter->trace_buffer->buffer;
                ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
                if (IS_ERR(ref->page)) {
diff --combined mm/kasan/common.c
index 09c5864745116f91cc7c972232e3f9482333ef80,303a7379d2a35c77658a7cc49c7a99e59e4855aa..36afcf64e016fa7ef39e3c4404f6b3c89a60917f
@@@ -36,7 -36,6 +36,7 @@@
  #include <linux/types.h>
  #include <linux/vmalloc.h>
  #include <linux/bug.h>
 +#include <linux/uaccess.h>
  
  #include "kasan.h"
  #include "../slab.h"
@@@ -49,37 -48,28 +49,28 @@@ static inline int in_irqentry_text(unsi
                 ptr < (unsigned long)&__softirqentry_text_end);
  }
  
- static inline void filter_irq_stacks(struct stack_trace *trace)
+ static inline unsigned int filter_irq_stacks(unsigned long *entries,
+                                            unsigned int nr_entries)
  {
-       int i;
+       unsigned int i;
  
-       if (!trace->nr_entries)
-               return;
-       for (i = 0; i < trace->nr_entries; i++)
-               if (in_irqentry_text(trace->entries[i])) {
+       for (i = 0; i < nr_entries; i++) {
+               if (in_irqentry_text(entries[i])) {
                        /* Include the irqentry function into the stack. */
-                       trace->nr_entries = i + 1;
-                       break;
+                       return i + 1;
                }
+       }
+       return nr_entries;
  }
  
  static inline depot_stack_handle_t save_stack(gfp_t flags)
  {
        unsigned long entries[KASAN_STACK_DEPTH];
-       struct stack_trace trace = {
-               .nr_entries = 0,
-               .entries = entries,
-               .max_entries = KASAN_STACK_DEPTH,
-               .skip = 0
-       };
+       unsigned int nr_entries;
  
-       save_stack_trace(&trace);
-       filter_irq_stacks(&trace);
-       if (trace.nr_entries != 0 &&
-           trace.entries[trace.nr_entries-1] == ULONG_MAX)
-               trace.nr_entries--;
-       return depot_save_stack(&trace, flags);
+       nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
+       nr_entries = filter_irq_stacks(entries, nr_entries);
+       return stack_depot_save(entries, nr_entries, flags);
  }
  
  static inline void set_track(struct kasan_track *track, gfp_t flags)
@@@ -615,15 -605,6 +606,15 @@@ void kasan_free_shadow(const struct vm_
                vfree(kasan_mem_to_shadow(vm->addr));
  }
  
 +extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
 +
 +void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
 +{
 +      unsigned long flags = user_access_save();
 +      __kasan_report(addr, size, is_write, ip);
 +      user_access_restore(flags);
 +}
 +
  #ifdef CONFIG_MEMORY_HOTPLUG
  static bool shadow_mapped(unsigned long addr)
  {
diff --combined mm/kasan/report.c
index 0772820ad09886ebf08b189a30499a6ba1c9d48a,882d77568e7ee9d5be006c872a316c2fdba19f8e..03a44357938675c84e43d3c5c936a272a7655fde
@@@ -100,10 -100,11 +100,11 @@@ static void print_track(struct kasan_tr
  {
        pr_err("%s by task %u:\n", prefix, track->pid);
        if (track->stack) {
-               struct stack_trace trace;
+               unsigned long *entries;
+               unsigned int nr_entries;
  
-               depot_fetch_stack(track->stack, &trace);
-               print_stack_trace(&trace, 0);
+               nr_entries = stack_depot_fetch(track->stack, &entries);
+               stack_trace_print(entries, nr_entries, 0);
        } else {
                pr_err("(stack is not available)\n");
        }
@@@ -281,7 -282,8 +282,7 @@@ void kasan_report_invalid_free(void *ob
        end_report(&flags);
  }
  
 -void kasan_report(unsigned long addr, size_t size,
 -              bool is_write, unsigned long ip)
 +void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
  {
        struct kasan_access_info info;
        void *tagged_addr;
diff --combined mm/kmemleak.c
index 2e435b8142e51ac9237110b451b750a6d4423fcc,d12b35de1e7ed68b83bafae9f59c63d67bac7c15..e57bf810f7983ac20663b92046250cb1f6bf1b53
@@@ -410,11 -410,6 +410,6 @@@ static void print_unreferenced(struct s
   */
  static void dump_object_info(struct kmemleak_object *object)
  {
-       struct stack_trace trace;
-       trace.nr_entries = object->trace_len;
-       trace.entries = object->trace;
        pr_notice("Object 0x%08lx (size %zu):\n",
                  object->pointer, object->size);
        pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
        pr_notice("  flags = 0x%x\n", object->flags);
        pr_notice("  checksum = %u\n", object->checksum);
        pr_notice("  backtrace:\n");
-       print_stack_trace(&trace, 4);
+       stack_trace_print(object->trace, object->trace_len, 4);
  }
  
  /*
@@@ -553,15 -548,7 +548,7 @@@ static struct kmemleak_object *find_and
   */
  static int __save_stack_trace(unsigned long *trace)
  {
-       struct stack_trace stack_trace;
-       stack_trace.max_entries = MAX_TRACE;
-       stack_trace.nr_entries = 0;
-       stack_trace.entries = trace;
-       stack_trace.skip = 2;
-       save_stack_trace(&stack_trace);
-       return stack_trace.nr_entries;
+       return stack_trace_save(trace, MAX_TRACE, 2);
  }
  
  /*
@@@ -1401,7 -1388,6 +1388,7 @@@ static void scan_block(void *_start, vo
  /*
   * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
   */
 +#ifdef CONFIG_SMP
  static void scan_large_block(void *start, void *end)
  {
        void *next;
                cond_resched();
        }
  }
 +#endif
  
  /*
   * Scan a memory block corresponding to a kmemleak_object. A condition is
@@@ -2021,13 -2006,8 +2008,8 @@@ early_param("kmemleak", kmemleak_boot_c
  
  static void __init print_log_trace(struct early_log *log)
  {
-       struct stack_trace trace;
-       trace.nr_entries = log->trace_len;
-       trace.entries = log->trace;
        pr_notice("Early log backtrace:\n");
-       print_stack_trace(&trace, 2);
+       stack_trace_print(log->trace, log->trace_len, 2);
  }
  
  /*
This page took 0.194713 seconds and 4 git commands to generate.