]> Git Repo - linux.git/commitdiff
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
authorIngo Molnar <[email protected]>
Fri, 11 Nov 2011 07:19:37 +0000 (08:19 +0100)
committerIngo Molnar <[email protected]>
Fri, 11 Nov 2011 07:19:37 +0000 (08:19 +0100)
1  2 
kernel/jump_label.c
kernel/lockdep.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_irqsoff.c

diff --combined kernel/jump_label.c
index bbdfe2a462a088b210d5792c674b215274b1b39b,e6f1f24ad57787665d21cfaa125a078ddc9de4cd..66ff7109f6970ca63cb4aa9bf6b4800d69ef2f3a
@@@ -66,8 -66,9 +66,9 @@@ void jump_label_inc(struct jump_label_k
                return;
  
        jump_label_lock();
-       if (atomic_add_return(1, &key->enabled) == 1)
+       if (atomic_read(&key->enabled) == 0)
                jump_label_update(key, JUMP_LABEL_ENABLE);
+       atomic_inc(&key->enabled);
        jump_label_unlock();
  }
  
@@@ -104,18 -105,6 +105,18 @@@ static int __jump_label_text_reserved(s
        return 0;
  }
  
 +/* 
 + * Update code which is definitely not currently executing.
 + * Architectures which need heavyweight synchronization to modify
 + * running code can override this to make the non-live update case
 + * cheaper.
 + */
 +void __weak arch_jump_label_transform_static(struct jump_entry *entry,
 +                                          enum jump_label_type type)
 +{
 +      arch_jump_label_transform(entry, type); 
 +}
 +
  static void __jump_label_update(struct jump_label_key *key,
                                struct jump_entry *entry,
                                struct jump_entry *stop, int enable)
        }
  }
  
 -/*
 - * Not all archs need this.
 - */
 -void __weak arch_jump_label_text_poke_early(jump_label_t addr)
 -{
 -}
 -
 -static __init int jump_label_init(void)
 +void __init jump_label_init(void)
  {
        struct jump_entry *iter_start = __start___jump_table;
        struct jump_entry *iter_stop = __stop___jump_table;
        jump_label_sort_entries(iter_start, iter_stop);
  
        for (iter = iter_start; iter < iter_stop; iter++) {
 -              arch_jump_label_text_poke_early(iter->code);
 -              if (iter->key == (jump_label_t)(unsigned long)key)
 +              struct jump_label_key *iterk;
 +
 +              iterk = (struct jump_label_key *)(unsigned long)iter->key;
 +              arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
 +                                               JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
 +              if (iterk == key)
                        continue;
  
 -              key = (struct jump_label_key *)(unsigned long)iter->key;
 -              atomic_set(&key->enabled, 0);
 +              key = iterk;
                key->entries = iter;
  #ifdef CONFIG_MODULES
                key->next = NULL;
  #endif
        }
        jump_label_unlock();
 -
 -      return 0;
  }
 -early_initcall(jump_label_init);
  
  #ifdef CONFIG_MODULES
  
@@@ -217,7 -213,7 +218,7 @@@ void jump_label_apply_nops(struct modul
                return;
  
        for (iter = iter_start; iter < iter_stop; iter++)
 -              arch_jump_label_text_poke_early(iter->code);
 +              arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
  }
  
  static int jump_label_add_module(struct module *mod)
diff --combined kernel/lockdep.c
index e69434b070da3f922909ece9417627e11234dcd6,6bd915df5fd34871134f59c136e5fc7d8786bc48..d2fab46a1c94e33cedb094e556a708370349f898
@@@ -96,13 -96,8 +96,13 @@@ static int graph_lock(void
  
  static inline int graph_unlock(void)
  {
 -      if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
 +      if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
 +              /*
 +               * The lockdep graph lock isn't locked while we expect it to
 +               * be, we're confused now, bye!
 +               */
                return DEBUG_LOCKS_WARN_ON(1);
 +      }
  
        current->lockdep_recursion--;
        arch_spin_unlock(&lockdep_lock);
@@@ -139,9 -134,6 +139,9 @@@ static struct lock_class lock_classes[M
  static inline struct lock_class *hlock_class(struct held_lock *hlock)
  {
        if (!hlock->class_idx) {
 +              /*
 +               * Someone passed in garbage, we give up.
 +               */
                DEBUG_LOCKS_WARN_ON(1);
                return NULL;
        }
@@@ -498,36 -490,32 +498,32 @@@ void get_usage_chars(struct lock_class 
        usage[i] = '\0';
  }
  
- static int __print_lock_name(struct lock_class *class)
+ static void __print_lock_name(struct lock_class *class)
  {
        char str[KSYM_NAME_LEN];
        const char *name;
  
-       name = class->name;
-       if (!name)
-               name = __get_key_name(class->key, str);
-       return printk("%s", name);
- }
- static void print_lock_name(struct lock_class *class)
- {
-       char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
-       const char *name;
-       get_usage_chars(class, usage);
        name = class->name;
        if (!name) {
                name = __get_key_name(class->key, str);
-               printk(" (%s", name);
+               printk("%s", name);
        } else {
-               printk(" (%s", name);
+               printk("%s", name);
                if (class->name_version > 1)
                        printk("#%d", class->name_version);
                if (class->subclass)
                        printk("/%d", class->subclass);
        }
+ }
+ static void print_lock_name(struct lock_class *class)
+ {
+       char usage[LOCK_USAGE_CHARS];
+       get_usage_chars(class, usage);
+       printk(" (");
+       __print_lock_name(class);
        printk("){%s}", usage);
  }
  
@@@ -695,10 -683,6 +691,10 @@@ look_up_lock_class(struct lockdep_map *
         */
        list_for_each_entry(class, hash_head, hash_entry) {
                if (class->key == key) {
 +                      /*
 +                       * Huh! same key, different name? Did someone trample
 +                       * on some memory? We're most confused.
 +                       */
                        WARN_ON_ONCE(class->name != lock->name);
                        return class;
                }
@@@ -812,10 -796,6 +808,10 @@@ out_unlock_set
        else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
                lock->class_cache[subclass] = class;
  
 +      /*
 +       * Hash collision, did we smoke some? We found a class with a matching
 +       * hash but the subclass -- which is hashed in -- didn't match.
 +       */
        if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
                return NULL;
  
@@@ -942,7 -922,7 +938,7 @@@ static inline void mark_lock_accessed(s
        unsigned long nr;
  
        nr = lock - list_entries;
 -      WARN_ON(nr >= nr_list_entries);
 +      WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
        lock->parent = parent;
        lock->class->dep_gen_id = lockdep_dependency_gen_id;
  }
@@@ -952,7 -932,7 +948,7 @@@ static inline unsigned long lock_access
        unsigned long nr;
  
        nr = lock - list_entries;
 -      WARN_ON(nr >= nr_list_entries);
 +      WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
        return lock->class->dep_gen_id == lockdep_dependency_gen_id;
  }
  
@@@ -1145,11 -1125,10 +1141,11 @@@ print_circular_bug_header(struct lock_l
        if (debug_locks_silent)
                return 0;
  
 -      printk("\n=======================================================\n");
 -      printk(  "[ INFO: possible circular locking dependency detected ]\n");
 +      printk("\n");
 +      printk("======================================================\n");
 +      printk("[ INFO: possible circular locking dependency detected ]\n");
        print_kernel_version();
 -      printk(  "-------------------------------------------------------\n");
 +      printk("-------------------------------------------------------\n");
        printk("%s/%d is trying to acquire lock:\n",
                curr->comm, task_pid_nr(curr));
        print_lock(check_src);
@@@ -1213,9 -1192,6 +1209,9 @@@ static noinline int print_bfs_bug(int r
        if (!debug_locks_off_graph_unlock())
                return 0;
  
 +      /*
 +       * Breadth-first-search failed, graph got corrupted?
 +       */
        WARN(1, "lockdep bfs error:%d\n", ret);
  
        return 0;
@@@ -1483,12 -1459,11 +1479,12 @@@ print_bad_irq_dependency(struct task_st
        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
                return 0;
  
 -      printk("\n======================================================\n");
 -      printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
 +      printk("\n");
 +      printk("======================================================\n");
 +      printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
                irqclass, irqclass);
        print_kernel_version();
 -      printk(  "------------------------------------------------------\n");
 +      printk("------------------------------------------------------\n");
        printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
                curr->comm, task_pid_nr(curr),
                curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
@@@ -1713,11 -1688,10 +1709,11 @@@ print_deadlock_bug(struct task_struct *
        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
                return 0;
  
 -      printk("\n=============================================\n");
 -      printk(  "[ INFO: possible recursive locking detected ]\n");
 +      printk("\n");
 +      printk("=============================================\n");
 +      printk("[ INFO: possible recursive locking detected ]\n");
        print_kernel_version();
 -      printk(  "---------------------------------------------\n");
 +      printk("---------------------------------------------\n");
        printk("%s/%d is trying to acquire lock:\n",
                curr->comm, task_pid_nr(curr));
        print_lock(next);
@@@ -1966,11 -1940,6 +1962,11 @@@ out_bug
        if (!debug_locks_off_graph_unlock())
                return 0;
  
 +      /*
 +       * Clearly we all shouldn't be here, but since we made it we
 +       * can reliable say we messed up our state. See the above two
 +       * gotos for reasons why we could possibly end up here.
 +       */
        WARN_ON(1);
  
        return 0;
@@@ -2002,11 -1971,6 +1998,11 @@@ static inline int lookup_chain_cache(st
        struct held_lock *hlock_curr, *hlock_next;
        int i, j;
  
 +      /*
 +       * We might need to take the graph lock, ensure we've got IRQs
 +       * disabled to make this an IRQ-safe lock.. for recursion reasons
 +       * lockdep won't complain about its own locking errors.
 +       */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return 0;
        /*
@@@ -2158,10 -2122,6 +2154,10 @@@ static void check_chain_key(struct task
                hlock = curr->held_locks + i;
                if (chain_key != hlock->prev_chain_key) {
                        debug_locks_off();
 +                      /*
 +                       * We got mighty confused, our chain keys don't match
 +                       * with what we expect, someone trample on our task state?
 +                       */
                        WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
                                curr->lockdep_depth, i,
                                (unsigned long long)chain_key,
                        return;
                }
                id = hlock->class_idx - 1;
 +              /*
 +               * Whoops ran out of static storage again?
 +               */
                if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
                        return;
  
        }
        if (chain_key != curr->curr_chain_key) {
                debug_locks_off();
 +              /*
 +               * More smoking hash instead of calculating it, damn see these
 +               * numbers float.. I bet that a pink elephant stepped on my memory.
 +               */
                WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
                        curr->lockdep_depth, i,
                        (unsigned long long)chain_key,
@@@ -2220,11 -2173,10 +2216,11 @@@ print_usage_bug(struct task_struct *cur
        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
                return 0;
  
 -      printk("\n=================================\n");
 -      printk(  "[ INFO: inconsistent lock state ]\n");
 +      printk("\n");
 +      printk("=================================\n");
 +      printk("[ INFO: inconsistent lock state ]\n");
        print_kernel_version();
 -      printk(  "---------------------------------\n");
 +      printk("---------------------------------\n");
  
        printk("inconsistent {%s} -> {%s} usage.\n",
                usage_str[prev_bit], usage_str[new_bit]);
@@@ -2285,11 -2237,10 +2281,11 @@@ print_irq_inversion_bug(struct task_str
        if (!debug_locks_off_graph_unlock() || debug_locks_silent)
                return 0;
  
 -      printk("\n=========================================================\n");
 -      printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
 +      printk("\n");
 +      printk("=========================================================\n");
 +      printk("[ INFO: possible irq lock inversion dependency detected ]\n");
        print_kernel_version();
 -      printk(  "---------------------------------------------------------\n");
 +      printk("---------------------------------------------------------\n");
        printk("%s/%d just changed the state of lock:\n",
                curr->comm, task_pid_nr(curr));
        print_lock(this);
@@@ -2570,24 -2521,12 +2566,24 @@@ void trace_hardirqs_on_caller(unsigned 
                return;
        }
  
 +      /*
 +       * We're enabling irqs and according to our state above irqs weren't
 +       * already enabled, yet we find the hardware thinks they are in fact
 +       * enabled.. someone messed up their IRQ state tracing.
 +       */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return;
  
 +      /*
 +       * See the fine text that goes along with this variable definition.
 +       */
        if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
                return;
  
 +      /*
 +       * Can't allow enabling interrupts while in an interrupt handler,
 +       * that's general bad form and such. Recursion, limited stack etc..
 +       */
        if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
                return;
  
@@@ -2615,10 -2554,6 +2611,10 @@@ void trace_hardirqs_off_caller(unsigne
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
  
 +      /*
 +       * So we're supposed to get called after you mask local IRQs, but for
 +       * some reason the hardware doesn't quite think you did a proper job.
 +       */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return;
  
@@@ -2651,10 -2586,6 +2647,10 @@@ void trace_softirqs_on(unsigned long ip
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
  
 +      /*
 +       * We fancy IRQs being disabled here, see softirq.c, avoids
 +       * funny state and nesting things.
 +       */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return;
  
@@@ -2691,9 -2622,6 +2687,9 @@@ void trace_softirqs_off(unsigned long i
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
  
 +      /*
 +       * We fancy IRQs being disabled here, see softirq.c
 +       */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return;
  
                curr->softirq_disable_ip = ip;
                curr->softirq_disable_event = ++curr->irq_events;
                debug_atomic_inc(softirqs_off_events);
 +              /*
 +               * Whoops, we wanted softirqs off, so why aren't they?
 +               */
                DEBUG_LOCKS_WARN_ON(!softirq_count());
        } else
                debug_atomic_inc(redundant_softirqs_off);
@@@ -2732,9 -2657,6 +2728,9 @@@ static void __lockdep_trace_alloc(gfp_
        if (!(gfp_mask & __GFP_FS))
                return;
  
 +      /*
 +       * Oi! Can't be having __GFP_FS allocations with IRQs disabled.
 +       */
        if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
                return;
  
@@@ -2847,13 -2769,13 +2843,13 @@@ static int separate_irq_context(struct 
        return 0;
  }
  
 -#else
 +#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
  
  static inline
  int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
                enum lock_usage_bit new_bit)
  {
 -      WARN_ON(1);
 +      WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
        return 1;
  }
  
@@@ -2873,7 -2795,7 +2869,7 @@@ void lockdep_trace_alloc(gfp_t gfp_mask
  {
  }
  
 -#endif
 +#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
  
  /*
   * Mark a lock with a usage bit, and validate the state transition:
@@@ -2954,9 -2876,6 +2950,9 @@@ void lockdep_init_map(struct lockdep_ma
        lock->cpu = raw_smp_processor_id();
  #endif
  
 +      /*
 +       * Can't be having no nameless bastards around this place!
 +       */
        if (DEBUG_LOCKS_WARN_ON(!name)) {
                lock->name = "NULL";
                return;
  
        lock->name = name;
  
 +      /*
 +       * No key, no joy, we need to hash something.
 +       */
        if (DEBUG_LOCKS_WARN_ON(!key))
                return;
        /*
         */
        if (!static_obj(key)) {
                printk("BUG: key %p not in .data!\n", key);
 +              /*
 +               * What it says above ^^^^^, I suggest you read it.
 +               */
                DEBUG_LOCKS_WARN_ON(1);
                return;
        }
@@@ -3015,11 -2928,6 +3011,11 @@@ static int __lock_acquire(struct lockde
        if (unlikely(!debug_locks))
                return 0;
  
 +      /*
 +       * Lockdep should run with IRQs disabled, otherwise we could
 +       * get an interrupt which would want to take locks, which would
 +       * end up in lockdep and have you got a head-ache already?
 +       */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return 0;
  
         * dependency checks are done)
         */
        depth = curr->lockdep_depth;
 +      /*
 +       * Ran out of static storage for our per-task lock stack again have we?
 +       */
        if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
                return 0;
  
        }
  
        hlock = curr->held_locks + depth;
 +      /*
 +       * Plain impossible, we just registered it and checked it weren't no
 +       * NULL like.. I bet this mushroom I ate was good!
 +       */
        if (DEBUG_LOCKS_WARN_ON(!class))
                return 0;
        hlock->class_idx = class_idx;
         * the hash, not class->key.
         */
        id = class - lock_classes;
 +      /*
 +       * Whoops, we did it again.. ran straight out of our static allocation.
 +       */
        if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
                return 0;
  
        chain_key = curr->curr_chain_key;
        if (!depth) {
 +              /*
 +               * How can we have a chain hash when we ain't got no keys?!
 +               */
                if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
                        return 0;
                chain_head = 1;
@@@ -3166,10 -3061,9 +3162,10 @@@ print_unlock_inbalance_bug(struct task_
        if (debug_locks_silent)
                return 0;
  
 -      printk("\n=====================================\n");
 -      printk(  "[ BUG: bad unlock balance detected! ]\n");
 -      printk(  "-------------------------------------\n");
 +      printk("\n");
 +      printk("=====================================\n");
 +      printk("[ BUG: bad unlock balance detected! ]\n");
 +      printk("-------------------------------------\n");
        printk("%s/%d is trying to release lock (",
                curr->comm, task_pid_nr(curr));
        print_lockdep_cache(lock);
@@@ -3193,9 -3087,6 +3189,9 @@@ static int check_unlock(struct task_str
  {
        if (unlikely(!debug_locks))
                return 0;
 +      /*
 +       * Lockdep should run with IRQs disabled, recursion, head-ache, etc..
 +       */
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return 0;
  
@@@ -3225,11 -3116,6 +3221,11 @@@ static int match_held_lock(struct held_
                if (!class)
                        return 0;
  
 +              /*
 +               * References, but not a lock we're actually ref-counting?
 +               * State got messed up, follow the sites that change ->references
 +               * and try to make sense of it.
 +               */
                if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
                        return 0;
  
@@@ -3252,10 -3138,6 +3248,10 @@@ __lock_set_class(struct lockdep_map *lo
        int i;
  
        depth = curr->lockdep_depth;
 +      /*
 +       * This function is about (re)setting the class of a held lock,
 +       * yet we're not actually holding any locks. Naughty user!
 +       */
        if (DEBUG_LOCKS_WARN_ON(!depth))
                return 0;
  
@@@ -3291,10 -3173,6 +3287,10 @@@ found_it
                        return 0;
        }
  
 +      /*
 +       * I took it apart and put it back together again, except now I have
 +       * these 'spare' parts.. where shall I put them.
 +       */
        if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
                return 0;
        return 1;
@@@ -3319,10 -3197,6 +3315,10 @@@ lock_release_non_nested(struct task_str
         * of held locks:
         */
        depth = curr->lockdep_depth;
 +      /*
 +       * So we're all set to release this lock.. wait what lock? We don't
 +       * own any locks, you've been drinking again?
 +       */
        if (DEBUG_LOCKS_WARN_ON(!depth))
                return 0;
  
@@@ -3375,10 -3249,6 +3371,10 @@@ found_it
                        return 0;
        }
  
 +      /*
 +       * We had N bottles of beer on the wall, we drank one, but now
 +       * there's not N-1 bottles of beer left on the wall...
 +       */
        if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
                return 0;
        return 1;
@@@ -3409,9 -3279,6 +3405,9 @@@ static int lock_release_nested(struct t
                return lock_release_non_nested(curr, lock, ip);
        curr->lockdep_depth--;
  
 +      /*
 +       * No more locks, but somehow we've got hash left over, who left it?
 +       */
        if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
                return 0;
  
@@@ -3494,13 -3361,10 +3490,13 @@@ static void check_flags(unsigned long f
         * check if not in hardirq contexts:
         */
        if (!hardirq_count()) {
 -              if (softirq_count())
 +              if (softirq_count()) {
 +                      /* like the above, but with softirqs */
                        DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
 -              else
 +              } else {
 +                      /* lick the above, does it taste good? */
                        DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
 +              }
        }
  
        if (!debug_locks)
@@@ -3610,10 -3474,9 +3606,10 @@@ print_lock_contention_bug(struct task_s
        if (debug_locks_silent)
                return 0;
  
 -      printk("\n=================================\n");
 -      printk(  "[ BUG: bad contention detected! ]\n");
 -      printk(  "---------------------------------\n");
 +      printk("\n");
 +      printk("=================================\n");
 +      printk("[ BUG: bad contention detected! ]\n");
 +      printk("---------------------------------\n");
        printk("%s/%d is trying to contend lock (",
                curr->comm, task_pid_nr(curr));
        print_lockdep_cache(lock);
@@@ -3639,10 -3502,6 +3635,10 @@@ __lock_contended(struct lockdep_map *lo
        int i, contention_point, contending_point;
  
        depth = curr->lockdep_depth;
 +      /*
 +       * Whee, we contended on this lock, except it seems we're not
 +       * actually trying to acquire anything much at all..
 +       */
        if (DEBUG_LOCKS_WARN_ON(!depth))
                return;
  
@@@ -3692,10 -3551,6 +3688,10 @@@ __lock_acquired(struct lockdep_map *loc
        int i, cpu;
  
        depth = curr->lockdep_depth;
 +      /*
 +       * Yay, we acquired ownership of this lock we didn't try to
 +       * acquire, how the heck did that happen?
 +       */
        if (DEBUG_LOCKS_WARN_ON(!depth))
                return;
  
@@@ -3900,12 -3755,8 +3896,12 @@@ void lockdep_reset_lock(struct lockdep_
                                match |= class == lock->class_cache[j];
  
                        if (unlikely(match)) {
 -                              if (debug_locks_off_graph_unlock())
 +                              if (debug_locks_off_graph_unlock()) {
 +                                      /*
 +                                       * We all just reset everything, how did it match?
 +                                       */
                                        WARN_ON(1);
 +                              }
                                goto out_restore;
                        }
                }
@@@ -3984,10 -3835,9 +3980,10 @@@ print_freed_lock_bug(struct task_struc
        if (debug_locks_silent)
                return;
  
 -      printk("\n=========================\n");
 -      printk(  "[ BUG: held lock freed! ]\n");
 -      printk(  "-------------------------\n");
 +      printk("\n");
 +      printk("=========================\n");
 +      printk("[ BUG: held lock freed! ]\n");
 +      printk("-------------------------\n");
        printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
                curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
        print_lock(hlock);
@@@ -4041,10 -3891,9 +4037,10 @@@ static void print_held_locks_bug(struc
        if (debug_locks_silent)
                return;
  
 -      printk("\n=====================================\n");
 -      printk(  "[ BUG: lock held at task exit time! ]\n");
 -      printk(  "-------------------------------------\n");
 +      printk("\n");
 +      printk("=====================================\n");
 +      printk("[ BUG: lock held at task exit time! ]\n");
 +      printk("-------------------------------------\n");
        printk("%s/%d is exiting with locks still held!\n",
                curr->comm, task_pid_nr(curr));
        lockdep_print_held_locks(curr);
@@@ -4138,17 -3987,16 +4134,17 @@@ void lockdep_sys_exit(void
        if (unlikely(curr->lockdep_depth)) {
                if (!debug_locks_off())
                        return;
 -              printk("\n================================================\n");
 -              printk(  "[ BUG: lock held when returning to user space! ]\n");
 -              printk(  "------------------------------------------------\n");
 +              printk("\n");
 +              printk("================================================\n");
 +              printk("[ BUG: lock held when returning to user space! ]\n");
 +              printk("------------------------------------------------\n");
                printk("%s/%d is leaving the kernel with locks still held!\n",
                                curr->comm, curr->pid);
                lockdep_print_held_locks(curr);
        }
  }
  
 -void lockdep_rcu_dereference(const char *file, const int line)
 +void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
  {
        struct task_struct *curr = current;
  
                return;
  #endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
        /* Note: the following can be executed concurrently, so be careful. */
 -      printk("\n===================================================\n");
 -      printk(  "[ INFO: suspicious rcu_dereference_check() usage. ]\n");
 -      printk(  "---------------------------------------------------\n");
 -      printk("%s:%d invoked rcu_dereference_check() without protection!\n",
 -                      file, line);
 +      printk("\n");
 +      printk("===============================\n");
 +      printk("[ INFO: suspicious RCU usage. ]\n");
 +      printk("-------------------------------\n");
 +      printk("%s:%d %s!\n", file, line, s);
        printk("\nother info that might help us debug this:\n\n");
        printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks);
        lockdep_print_held_locks(curr);
        printk("\nstack backtrace:\n");
        dump_stack();
  }
 -EXPORT_SYMBOL_GPL(lockdep_rcu_dereference);
 +EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
diff --combined kernel/trace/ftrace.c
index 900b409543db10cfc46b9da703f463a1cee9b78e,7caa4505508da64dcf3c8836808aa5bb8c0f5576..b1e8943fed1d3a9fd61916527c59c70d57af7d2c
@@@ -22,7 -22,6 +22,7 @@@
  #include <linux/hardirq.h>
  #include <linux/kthread.h>
  #include <linux/uaccess.h>
 +#include <linux/module.h>
  #include <linux/ftrace.h>
  #include <linux/sysctl.h>
  #include <linux/slab.h>
@@@ -152,7 -151,6 +152,6 @@@ void clear_ftrace_function(void
        ftrace_pid_function = ftrace_stub;
  }
  
- #undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
  /*
   * For those archs that do not test ftrace_trace_stop in their
@@@ -1212,7 -1210,9 +1211,9 @@@ ftrace_hash_move(struct ftrace_ops *ops
        if (!src->count) {
                free_ftrace_hash_rcu(*dst);
                rcu_assign_pointer(*dst, EMPTY_HASH);
-               return 0;
+               /* still need to update the function records */
+               ret = 0;
+               goto out;
        }
  
        /*
diff --combined kernel/trace/trace.c
index f2bd275bb60f5c63630ee4483c02a0f33d53e46d,b296186eb93a035aad053359e1c5469c1a980957..9e158cc84ced3878a68a06823f170f16dd1ec663
@@@ -341,7 -341,7 +341,7 @@@ unsigned long trace_flags = TRACE_ITER_
        TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE;
  
  static int trace_stop_count;
 -static DEFINE_SPINLOCK(tracing_start_lock);
 +static DEFINE_RAW_SPINLOCK(tracing_start_lock);
  
  static void wakeup_work_handler(struct work_struct *work)
  {
@@@ -961,7 -961,7 +961,7 @@@ void tracing_start(void
        if (tracing_disabled)
                return;
  
 -      spin_lock_irqsave(&tracing_start_lock, flags);
 +      raw_spin_lock_irqsave(&tracing_start_lock, flags);
        if (--trace_stop_count) {
                if (trace_stop_count < 0) {
                        /* Someone screwed up their debugging */
  
        ftrace_start();
   out:
 -      spin_unlock_irqrestore(&tracing_start_lock, flags);
 +      raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
  }
  
  /**
@@@ -1001,7 -1001,7 +1001,7 @@@ void tracing_stop(void
        unsigned long flags;
  
        ftrace_stop();
 -      spin_lock_irqsave(&tracing_start_lock, flags);
 +      raw_spin_lock_irqsave(&tracing_start_lock, flags);
        if (trace_stop_count++)
                goto out;
  
        arch_spin_unlock(&ftrace_max_lock);
  
   out:
 -      spin_unlock_irqrestore(&tracing_start_lock, flags);
 +      raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
  }
  
  void trace_stop_cmdline_recording(void);
@@@ -2140,6 -2140,21 +2140,21 @@@ enum print_line_t print_trace_line(stru
        return print_trace_fmt(iter);
  }
  
+ void trace_latency_header(struct seq_file *m)
+ {
+       struct trace_iterator *iter = m->private;
+       /* print nothing if the buffers are empty */
+       if (trace_empty(iter))
+               return;
+       if (iter->iter_flags & TRACE_FILE_LAT_FMT)
+               print_trace_header(m, iter);
+       if (!(trace_flags & TRACE_ITER_VERBOSE))
+               print_lat_help_header(m);
+ }
  void trace_default_header(struct seq_file *m)
  {
        struct trace_iterator *iter = m->private;
index 20dad0d7a1633c551eec45e7f708dca4ac48b38c,a248c686b2b8e39dc94b2be3ff250c91c57b9e88..99d20e9203686420e74af5c2eca6387deb375028
@@@ -23,7 -23,7 +23,7 @@@ static int                            tracer_enabled __read_mos
  
  static DEFINE_PER_CPU(int, tracing_cpu);
  
 -static DEFINE_SPINLOCK(max_trace_lock);
 +static DEFINE_RAW_SPINLOCK(max_trace_lock);
  
  enum {
        TRACER_IRQS_OFF         = (1 << 1),
@@@ -280,9 -280,20 +280,20 @@@ static enum print_line_t irqsoff_print_
  }
  
  static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
- static void irqsoff_print_header(struct seq_file *s) { }
  static void irqsoff_trace_open(struct trace_iterator *iter) { }
  static void irqsoff_trace_close(struct trace_iterator *iter) { }
+ #ifdef CONFIG_FUNCTION_TRACER
+ static void irqsoff_print_header(struct seq_file *s)
+ {
+       trace_default_header(s);
+ }
+ #else
+ static void irqsoff_print_header(struct seq_file *s)
+ {
+       trace_latency_header(s);
+ }
+ #endif /* CONFIG_FUNCTION_TRACER */
  #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  
  /*
@@@ -321,7 -332,7 +332,7 @@@ check_critical_timing(struct trace_arra
        if (!report_latency(delta))
                goto out;
  
 -      spin_lock_irqsave(&max_trace_lock, flags);
 +      raw_spin_lock_irqsave(&max_trace_lock, flags);
  
        /* check if we are still the max latency */
        if (!report_latency(delta))
        max_sequence++;
  
  out_unlock:
 -      spin_unlock_irqrestore(&max_trace_lock, flags);
 +      raw_spin_unlock_irqrestore(&max_trace_lock, flags);
  
  out:
        data->critical_sequence = max_sequence;
This page took 0.108094 seconds and 4 git commands to generate.