]> Git Repo - linux.git/blob - kernel/locking/lockdep.c
locking/lockdep: Free lock classes that are no longer in use
[linux.git] / kernel / locking / lockdep.c
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/sched/clock.h>
32 #include <linux/sched/task.h>
33 #include <linux/sched/mm.h>
34 #include <linux/delay.h>
35 #include <linux/module.h>
36 #include <linux/proc_fs.h>
37 #include <linux/seq_file.h>
38 #include <linux/spinlock.h>
39 #include <linux/kallsyms.h>
40 #include <linux/interrupt.h>
41 #include <linux/stacktrace.h>
42 #include <linux/debug_locks.h>
43 #include <linux/irqflags.h>
44 #include <linux/utsname.h>
45 #include <linux/hash.h>
46 #include <linux/ftrace.h>
47 #include <linux/stringify.h>
48 #include <linux/bitops.h>
49 #include <linux/gfp.h>
50 #include <linux/random.h>
51 #include <linux/jhash.h>
52 #include <linux/nmi.h>
53 #include <linux/rcupdate.h>
54
55 #include <asm/sections.h>
56
57 #include "lockdep_internals.h"
58
59 #define CREATE_TRACE_POINTS
60 #include <trace/events/lock.h>
61
62 #ifdef CONFIG_PROVE_LOCKING
63 int prove_locking = 1;
64 module_param(prove_locking, int, 0644);
65 #else
66 #define prove_locking 0
67 #endif
68
69 #ifdef CONFIG_LOCK_STAT
70 int lock_stat = 1;
71 module_param(lock_stat, int, 0644);
72 #else
73 #define lock_stat 0
74 #endif
75
76 /*
77  * lockdep_lock: protects the lockdep graph, the hashes and the
78  *               class/list/hash allocators.
79  *
80  * This is one of the rare exceptions where it's justified
81  * to use a raw spinlock - we really dont want the spinlock
82  * code to recurse back into the lockdep code...
83  */
84 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
85 static struct task_struct *lockdep_selftest_task_struct;
86
87 static int graph_lock(void)
88 {
89         arch_spin_lock(&lockdep_lock);
90         /*
91          * Make sure that if another CPU detected a bug while
92          * walking the graph we dont change it (while the other
93          * CPU is busy printing out stuff with the graph lock
94          * dropped already)
95          */
96         if (!debug_locks) {
97                 arch_spin_unlock(&lockdep_lock);
98                 return 0;
99         }
100         /* prevent any recursions within lockdep from causing deadlocks */
101         current->lockdep_recursion++;
102         return 1;
103 }
104
105 static inline int graph_unlock(void)
106 {
107         if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
108                 /*
109                  * The lockdep graph lock isn't locked while we expect it to
110                  * be, we're confused now, bye!
111                  */
112                 return DEBUG_LOCKS_WARN_ON(1);
113         }
114
115         current->lockdep_recursion--;
116         arch_spin_unlock(&lockdep_lock);
117         return 0;
118 }
119
120 /*
121  * Turn lock debugging off and return with 0 if it was off already,
122  * and also release the graph lock:
123  */
124 static inline int debug_locks_off_graph_unlock(void)
125 {
126         int ret = debug_locks_off();
127
128         arch_spin_unlock(&lockdep_lock);
129
130         return ret;
131 }
132
133 unsigned long nr_list_entries;
134 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
135
136 /*
137  * All data structures here are protected by the global debug_lock.
138  *
139  * nr_lock_classes is the number of elements of lock_classes[] that is
140  * in use.
141  */
142 unsigned long nr_lock_classes;
143 #ifndef CONFIG_DEBUG_LOCKDEP
144 static
145 #endif
146 struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
147
148 static inline struct lock_class *hlock_class(struct held_lock *hlock)
149 {
150         if (!hlock->class_idx) {
151                 /*
152                  * Someone passed in garbage, we give up.
153                  */
154                 DEBUG_LOCKS_WARN_ON(1);
155                 return NULL;
156         }
157         return lock_classes + hlock->class_idx - 1;
158 }
159
160 #ifdef CONFIG_LOCK_STAT
161 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
162
163 static inline u64 lockstat_clock(void)
164 {
165         return local_clock();
166 }
167
168 static int lock_point(unsigned long points[], unsigned long ip)
169 {
170         int i;
171
172         for (i = 0; i < LOCKSTAT_POINTS; i++) {
173                 if (points[i] == 0) {
174                         points[i] = ip;
175                         break;
176                 }
177                 if (points[i] == ip)
178                         break;
179         }
180
181         return i;
182 }
183
184 static void lock_time_inc(struct lock_time *lt, u64 time)
185 {
186         if (time > lt->max)
187                 lt->max = time;
188
189         if (time < lt->min || !lt->nr)
190                 lt->min = time;
191
192         lt->total += time;
193         lt->nr++;
194 }
195
196 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
197 {
198         if (!src->nr)
199                 return;
200
201         if (src->max > dst->max)
202                 dst->max = src->max;
203
204         if (src->min < dst->min || !dst->nr)
205                 dst->min = src->min;
206
207         dst->total += src->total;
208         dst->nr += src->nr;
209 }
210
211 struct lock_class_stats lock_stats(struct lock_class *class)
212 {
213         struct lock_class_stats stats;
214         int cpu, i;
215
216         memset(&stats, 0, sizeof(struct lock_class_stats));
217         for_each_possible_cpu(cpu) {
218                 struct lock_class_stats *pcs =
219                         &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
220
221                 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
222                         stats.contention_point[i] += pcs->contention_point[i];
223
224                 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
225                         stats.contending_point[i] += pcs->contending_point[i];
226
227                 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
228                 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
229
230                 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
231                 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
232
233                 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
234                         stats.bounces[i] += pcs->bounces[i];
235         }
236
237         return stats;
238 }
239
240 void clear_lock_stats(struct lock_class *class)
241 {
242         int cpu;
243
244         for_each_possible_cpu(cpu) {
245                 struct lock_class_stats *cpu_stats =
246                         &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
247
248                 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
249         }
250         memset(class->contention_point, 0, sizeof(class->contention_point));
251         memset(class->contending_point, 0, sizeof(class->contending_point));
252 }
253
254 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
255 {
256         return &this_cpu_ptr(cpu_lock_stats)[class - lock_classes];
257 }
258
259 static void lock_release_holdtime(struct held_lock *hlock)
260 {
261         struct lock_class_stats *stats;
262         u64 holdtime;
263
264         if (!lock_stat)
265                 return;
266
267         holdtime = lockstat_clock() - hlock->holdtime_stamp;
268
269         stats = get_lock_stats(hlock_class(hlock));
270         if (hlock->read)
271                 lock_time_inc(&stats->read_holdtime, holdtime);
272         else
273                 lock_time_inc(&stats->write_holdtime, holdtime);
274 }
275 #else
276 static inline void lock_release_holdtime(struct held_lock *hlock)
277 {
278 }
279 #endif
280
281 /*
282  * We keep a global list of all lock classes. The list is only accessed with
283  * the lockdep spinlock lock held. free_lock_classes is a list with free
284  * elements. These elements are linked together by the lock_entry member in
285  * struct lock_class.
286  */
287 LIST_HEAD(all_lock_classes);
288 static LIST_HEAD(free_lock_classes);
289
290 /**
291  * struct pending_free - information about data structures about to be freed
292  * @zapped: Head of a list with struct lock_class elements.
293  */
294 struct pending_free {
295         struct list_head zapped;
296 };
297
298 /**
299  * struct delayed_free - data structures used for delayed freeing
300  *
301  * A data structure for delayed freeing of data structures that may be
302  * accessed by RCU readers at the time these were freed.
303  *
304  * @rcu_head:  Used to schedule an RCU callback for freeing data structures.
305  * @index:     Index of @pf to which freed data structures are added.
306  * @scheduled: Whether or not an RCU callback has been scheduled.
307  * @pf:        Array with information about data structures about to be freed.
308  */
309 static struct delayed_free {
310         struct rcu_head         rcu_head;
311         int                     index;
312         int                     scheduled;
313         struct pending_free     pf[2];
314 } delayed_free;
315
316 /*
317  * The lockdep classes are in a hash-table as well, for fast lookup:
318  */
319 #define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
320 #define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
321 #define __classhashfn(key)      hash_long((unsigned long)key, CLASSHASH_BITS)
322 #define classhashentry(key)     (classhash_table + __classhashfn((key)))
323
324 static struct hlist_head classhash_table[CLASSHASH_SIZE];
325
326 /*
327  * We put the lock dependency chains into a hash-table as well, to cache
328  * their existence:
329  */
330 #define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
331 #define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
332 #define __chainhashfn(chain)    hash_long(chain, CHAINHASH_BITS)
333 #define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
334
335 static struct hlist_head chainhash_table[CHAINHASH_SIZE];
336
337 /*
338  * The hash key of the lock dependency chains is a hash itself too:
339  * it's a hash of all locks taken up to that lock, including that lock.
340  * It's a 64-bit hash, because it's important for the keys to be
341  * unique.
342  */
343 static inline u64 iterate_chain_key(u64 key, u32 idx)
344 {
345         u32 k0 = key, k1 = key >> 32;
346
347         __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
348
349         return k0 | (u64)k1 << 32;
350 }
351
352 void lockdep_off(void)
353 {
354         current->lockdep_recursion++;
355 }
356 EXPORT_SYMBOL(lockdep_off);
357
358 void lockdep_on(void)
359 {
360         current->lockdep_recursion--;
361 }
362 EXPORT_SYMBOL(lockdep_on);
363
364 void lockdep_set_selftest_task(struct task_struct *task)
365 {
366         lockdep_selftest_task_struct = task;
367 }
368
369 /*
370  * Debugging switches:
371  */
372
373 #define VERBOSE                 0
374 #define VERY_VERBOSE            0
375
376 #if VERBOSE
377 # define HARDIRQ_VERBOSE        1
378 # define SOFTIRQ_VERBOSE        1
379 #else
380 # define HARDIRQ_VERBOSE        0
381 # define SOFTIRQ_VERBOSE        0
382 #endif
383
384 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
385 /*
386  * Quick filtering for interesting events:
387  */
388 static int class_filter(struct lock_class *class)
389 {
390 #if 0
391         /* Example */
392         if (class->name_version == 1 &&
393                         !strcmp(class->name, "lockname"))
394                 return 1;
395         if (class->name_version == 1 &&
396                         !strcmp(class->name, "&struct->lockfield"))
397                 return 1;
398 #endif
399         /* Filter everything else. 1 would be to allow everything else */
400         return 0;
401 }
402 #endif
403
404 static int verbose(struct lock_class *class)
405 {
406 #if VERBOSE
407         return class_filter(class);
408 #endif
409         return 0;
410 }
411
412 /*
413  * Stack-trace: tightly packed array of stack backtrace
414  * addresses. Protected by the graph_lock.
415  */
416 unsigned long nr_stack_trace_entries;
417 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
418
419 static void print_lockdep_off(const char *bug_msg)
420 {
421         printk(KERN_DEBUG "%s\n", bug_msg);
422         printk(KERN_DEBUG "turning off the locking correctness validator.\n");
423 #ifdef CONFIG_LOCK_STAT
424         printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
425 #endif
426 }
427
428 static int save_trace(struct stack_trace *trace)
429 {
430         trace->nr_entries = 0;
431         trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
432         trace->entries = stack_trace + nr_stack_trace_entries;
433
434         trace->skip = 3;
435
436         save_stack_trace(trace);
437
438         /*
439          * Some daft arches put -1 at the end to indicate its a full trace.
440          *
441          * <rant> this is buggy anyway, since it takes a whole extra entry so a
442          * complete trace that maxes out the entries provided will be reported
443          * as incomplete, friggin useless </rant>
444          */
445         if (trace->nr_entries != 0 &&
446             trace->entries[trace->nr_entries-1] == ULONG_MAX)
447                 trace->nr_entries--;
448
449         trace->max_entries = trace->nr_entries;
450
451         nr_stack_trace_entries += trace->nr_entries;
452
453         if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
454                 if (!debug_locks_off_graph_unlock())
455                         return 0;
456
457                 print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
458                 dump_stack();
459
460                 return 0;
461         }
462
463         return 1;
464 }
465
466 unsigned int nr_hardirq_chains;
467 unsigned int nr_softirq_chains;
468 unsigned int nr_process_chains;
469 unsigned int max_lockdep_depth;
470
471 #ifdef CONFIG_DEBUG_LOCKDEP
472 /*
473  * Various lockdep statistics:
474  */
475 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
476 #endif
477
478 /*
479  * Locking printouts:
480  */
481
482 #define __USAGE(__STATE)                                                \
483         [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",       \
484         [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",         \
485         [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
486         [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
487
488 static const char *usage_str[] =
489 {
490 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
491 #include "lockdep_states.h"
492 #undef LOCKDEP_STATE
493         [LOCK_USED] = "INITIAL USE",
494 };
495
496 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
497 {
498         return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
499 }
500
501 static inline unsigned long lock_flag(enum lock_usage_bit bit)
502 {
503         return 1UL << bit;
504 }
505
506 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
507 {
508         char c = '.';
509
510         if (class->usage_mask & lock_flag(bit + 2))
511                 c = '+';
512         if (class->usage_mask & lock_flag(bit)) {
513                 c = '-';
514                 if (class->usage_mask & lock_flag(bit + 2))
515                         c = '?';
516         }
517
518         return c;
519 }
520
521 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
522 {
523         int i = 0;
524
525 #define LOCKDEP_STATE(__STATE)                                          \
526         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);     \
527         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
528 #include "lockdep_states.h"
529 #undef LOCKDEP_STATE
530
531         usage[i] = '\0';
532 }
533
534 static void __print_lock_name(struct lock_class *class)
535 {
536         char str[KSYM_NAME_LEN];
537         const char *name;
538
539         name = class->name;
540         if (!name) {
541                 name = __get_key_name(class->key, str);
542                 printk(KERN_CONT "%s", name);
543         } else {
544                 printk(KERN_CONT "%s", name);
545                 if (class->name_version > 1)
546                         printk(KERN_CONT "#%d", class->name_version);
547                 if (class->subclass)
548                         printk(KERN_CONT "/%d", class->subclass);
549         }
550 }
551
552 static void print_lock_name(struct lock_class *class)
553 {
554         char usage[LOCK_USAGE_CHARS];
555
556         get_usage_chars(class, usage);
557
558         printk(KERN_CONT " (");
559         __print_lock_name(class);
560         printk(KERN_CONT "){%s}", usage);
561 }
562
563 static void print_lockdep_cache(struct lockdep_map *lock)
564 {
565         const char *name;
566         char str[KSYM_NAME_LEN];
567
568         name = lock->name;
569         if (!name)
570                 name = __get_key_name(lock->key->subkeys, str);
571
572         printk(KERN_CONT "%s", name);
573 }
574
575 static void print_lock(struct held_lock *hlock)
576 {
577         /*
578          * We can be called locklessly through debug_show_all_locks() so be
579          * extra careful, the hlock might have been released and cleared.
580          */
581         unsigned int class_idx = hlock->class_idx;
582
583         /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
584         barrier();
585
586         if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
587                 printk(KERN_CONT "<RELEASED>\n");
588                 return;
589         }
590
591         printk(KERN_CONT "%p", hlock->instance);
592         print_lock_name(lock_classes + class_idx - 1);
593         printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
594 }
595
596 static void lockdep_print_held_locks(struct task_struct *p)
597 {
598         int i, depth = READ_ONCE(p->lockdep_depth);
599
600         if (!depth)
601                 printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p));
602         else
603                 printk("%d lock%s held by %s/%d:\n", depth,
604                        depth > 1 ? "s" : "", p->comm, task_pid_nr(p));
605         /*
606          * It's not reliable to print a task's held locks if it's not sleeping
607          * and it's not the current task.
608          */
609         if (p->state == TASK_RUNNING && p != current)
610                 return;
611         for (i = 0; i < depth; i++) {
612                 printk(" #%d: ", i);
613                 print_lock(p->held_locks + i);
614         }
615 }
616
617 static void print_kernel_ident(void)
618 {
619         printk("%s %.*s %s\n", init_utsname()->release,
620                 (int)strcspn(init_utsname()->version, " "),
621                 init_utsname()->version,
622                 print_tainted());
623 }
624
625 static int very_verbose(struct lock_class *class)
626 {
627 #if VERY_VERBOSE
628         return class_filter(class);
629 #endif
630         return 0;
631 }
632
633 /*
634  * Is this the address of a static object:
635  */
636 #ifdef __KERNEL__
637 static int static_obj(void *obj)
638 {
639         unsigned long start = (unsigned long) &_stext,
640                       end   = (unsigned long) &_end,
641                       addr  = (unsigned long) obj;
642
643         /*
644          * static variable?
645          */
646         if ((addr >= start) && (addr < end))
647                 return 1;
648
649         if (arch_is_kernel_data(addr))
650                 return 1;
651
652         /*
653          * in-kernel percpu var?
654          */
655         if (is_kernel_percpu_address(addr))
656                 return 1;
657
658         /*
659          * module static or percpu var?
660          */
661         return is_module_address(addr) || is_module_percpu_address(addr);
662 }
663 #endif
664
665 /*
666  * To make lock name printouts unique, we calculate a unique
667  * class->name_version generation counter. The caller must hold the graph
668  * lock.
669  */
670 static int count_matching_names(struct lock_class *new_class)
671 {
672         struct lock_class *class;
673         int count = 0;
674
675         if (!new_class->name)
676                 return 0;
677
678         list_for_each_entry(class, &all_lock_classes, lock_entry) {
679                 if (new_class->key - new_class->subclass == class->key)
680                         return class->name_version;
681                 if (class->name && !strcmp(class->name, new_class->name))
682                         count = max(count, class->name_version);
683         }
684
685         return count + 1;
686 }
687
688 static inline struct lock_class *
689 look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
690 {
691         struct lockdep_subclass_key *key;
692         struct hlist_head *hash_head;
693         struct lock_class *class;
694
695         if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
696                 debug_locks_off();
697                 printk(KERN_ERR
698                         "BUG: looking up invalid subclass: %u\n", subclass);
699                 printk(KERN_ERR
700                         "turning off the locking correctness validator.\n");
701                 dump_stack();
702                 return NULL;
703         }
704
705         /*
706          * If it is not initialised then it has never been locked,
707          * so it won't be present in the hash table.
708          */
709         if (unlikely(!lock->key))
710                 return NULL;
711
712         /*
713          * NOTE: the class-key must be unique. For dynamic locks, a static
714          * lock_class_key variable is passed in through the mutex_init()
715          * (or spin_lock_init()) call - which acts as the key. For static
716          * locks we use the lock object itself as the key.
717          */
718         BUILD_BUG_ON(sizeof(struct lock_class_key) >
719                         sizeof(struct lockdep_map));
720
721         key = lock->key->subkeys + subclass;
722
723         hash_head = classhashentry(key);
724
725         /*
726          * We do an RCU walk of the hash, see lockdep_free_key_range().
727          */
728         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
729                 return NULL;
730
731         hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
732                 if (class->key == key) {
733                         /*
734                          * Huh! same key, different name? Did someone trample
735                          * on some memory? We're most confused.
736                          */
737                         WARN_ON_ONCE(class->name != lock->name);
738                         return class;
739                 }
740         }
741
742         return NULL;
743 }
744
745 /*
746  * Static locks do not have their class-keys yet - for them the key is
747  * the lock object itself. If the lock is in the per cpu area, the
748  * canonical address of the lock (per cpu offset removed) is used.
749  */
750 static bool assign_lock_key(struct lockdep_map *lock)
751 {
752         unsigned long can_addr, addr = (unsigned long)lock;
753
754         if (__is_kernel_percpu_address(addr, &can_addr))
755                 lock->key = (void *)can_addr;
756         else if (__is_module_percpu_address(addr, &can_addr))
757                 lock->key = (void *)can_addr;
758         else if (static_obj(lock))
759                 lock->key = (void *)lock;
760         else {
761                 /* Debug-check: all keys must be persistent! */
762                 debug_locks_off();
763                 pr_err("INFO: trying to register non-static key.\n");
764                 pr_err("the code is fine but needs lockdep annotation.\n");
765                 pr_err("turning off the locking correctness validator.\n");
766                 dump_stack();
767                 return false;
768         }
769
770         return true;
771 }
772
773 /*
774  * Initialize the lock_classes[] array elements, the free_lock_classes list
775  * and also the delayed_free structure.
776  */
777 static void init_data_structures_once(void)
778 {
779         static bool initialization_happened;
780         int i;
781
782         if (likely(initialization_happened))
783                 return;
784
785         initialization_happened = true;
786
787         init_rcu_head(&delayed_free.rcu_head);
788         INIT_LIST_HEAD(&delayed_free.pf[0].zapped);
789         INIT_LIST_HEAD(&delayed_free.pf[1].zapped);
790
791         for (i = 0; i < ARRAY_SIZE(lock_classes); i++) {
792                 list_add_tail(&lock_classes[i].lock_entry, &free_lock_classes);
793                 INIT_LIST_HEAD(&lock_classes[i].locks_after);
794                 INIT_LIST_HEAD(&lock_classes[i].locks_before);
795         }
796 }
797
798 /*
799  * Register a lock's class in the hash-table, if the class is not present
800  * yet. Otherwise we look it up. We cache the result in the lock object
801  * itself, so actual lookup of the hash should be once per lock object.
802  */
803 static struct lock_class *
804 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
805 {
806         struct lockdep_subclass_key *key;
807         struct hlist_head *hash_head;
808         struct lock_class *class;
809
810         DEBUG_LOCKS_WARN_ON(!irqs_disabled());
811
812         class = look_up_lock_class(lock, subclass);
813         if (likely(class))
814                 goto out_set_class_cache;
815
816         if (!lock->key) {
817                 if (!assign_lock_key(lock))
818                         return NULL;
819         } else if (!static_obj(lock->key)) {
820                 return NULL;
821         }
822
823         key = lock->key->subkeys + subclass;
824         hash_head = classhashentry(key);
825
826         if (!graph_lock()) {
827                 return NULL;
828         }
829         /*
830          * We have to do the hash-walk again, to avoid races
831          * with another CPU:
832          */
833         hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
834                 if (class->key == key)
835                         goto out_unlock_set;
836         }
837
838         init_data_structures_once();
839
840         /* Allocate a new lock class and add it to the hash. */
841         class = list_first_entry_or_null(&free_lock_classes, typeof(*class),
842                                          lock_entry);
843         if (!class) {
844                 if (!debug_locks_off_graph_unlock()) {
845                         return NULL;
846                 }
847
848                 print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
849                 dump_stack();
850                 return NULL;
851         }
852         nr_lock_classes++;
853         debug_atomic_inc(nr_unused_locks);
854         class->key = key;
855         class->name = lock->name;
856         class->subclass = subclass;
857         WARN_ON_ONCE(!list_empty(&class->locks_before));
858         WARN_ON_ONCE(!list_empty(&class->locks_after));
859         class->name_version = count_matching_names(class);
860         /*
861          * We use RCU's safe list-add method to make
862          * parallel walking of the hash-list safe:
863          */
864         hlist_add_head_rcu(&class->hash_entry, hash_head);
865         /*
866          * Remove the class from the free list and add it to the global list
867          * of classes.
868          */
869         list_move_tail(&class->lock_entry, &all_lock_classes);
870
871         if (verbose(class)) {
872                 graph_unlock();
873
874                 printk("\nnew class %px: %s", class->key, class->name);
875                 if (class->name_version > 1)
876                         printk(KERN_CONT "#%d", class->name_version);
877                 printk(KERN_CONT "\n");
878                 dump_stack();
879
880                 if (!graph_lock()) {
881                         return NULL;
882                 }
883         }
884 out_unlock_set:
885         graph_unlock();
886
887 out_set_class_cache:
888         if (!subclass || force)
889                 lock->class_cache[0] = class;
890         else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
891                 lock->class_cache[subclass] = class;
892
893         /*
894          * Hash collision, did we smoke some? We found a class with a matching
895          * hash but the subclass -- which is hashed in -- didn't match.
896          */
897         if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
898                 return NULL;
899
900         return class;
901 }
902
903 #ifdef CONFIG_PROVE_LOCKING
904 /*
905  * Allocate a lockdep entry. (assumes the graph_lock held, returns
906  * with NULL on failure)
907  */
908 static struct lock_list *alloc_list_entry(void)
909 {
910         if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
911                 if (!debug_locks_off_graph_unlock())
912                         return NULL;
913
914                 print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
915                 dump_stack();
916                 return NULL;
917         }
918         return list_entries + nr_list_entries++;
919 }
920
921 /*
922  * Add a new dependency to the head of the list:
923  */
924 static int add_lock_to_list(struct lock_class *this,
925                             struct lock_class *links_to, struct list_head *head,
926                             unsigned long ip, int distance,
927                             struct stack_trace *trace)
928 {
929         struct lock_list *entry;
930         /*
931          * Lock not present yet - get a new dependency struct and
932          * add it to the list:
933          */
934         entry = alloc_list_entry();
935         if (!entry)
936                 return 0;
937
938         entry->class = this;
939         entry->links_to = links_to;
940         entry->distance = distance;
941         entry->trace = *trace;
942         /*
943          * Both allocation and removal are done under the graph lock; but
944          * iteration is under RCU-sched; see look_up_lock_class() and
945          * lockdep_free_key_range().
946          */
947         list_add_tail_rcu(&entry->entry, head);
948
949         return 1;
950 }
951
952 /*
953  * For good efficiency of modular, we use power of 2
954  */
955 #define MAX_CIRCULAR_QUEUE_SIZE         4096UL
956 #define CQ_MASK                         (MAX_CIRCULAR_QUEUE_SIZE-1)
957
958 /*
959  * The circular_queue and helpers is used to implement the
960  * breadth-first search(BFS)algorithem, by which we can build
961  * the shortest path from the next lock to be acquired to the
962  * previous held lock if there is a circular between them.
963  */
964 struct circular_queue {
965         unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
966         unsigned int  front, rear;
967 };
968
969 static struct circular_queue lock_cq;
970
971 unsigned int max_bfs_queue_depth;
972
973 static unsigned int lockdep_dependency_gen_id;
974
975 static inline void __cq_init(struct circular_queue *cq)
976 {
977         cq->front = cq->rear = 0;
978         lockdep_dependency_gen_id++;
979 }
980
981 static inline int __cq_empty(struct circular_queue *cq)
982 {
983         return (cq->front == cq->rear);
984 }
985
986 static inline int __cq_full(struct circular_queue *cq)
987 {
988         return ((cq->rear + 1) & CQ_MASK) == cq->front;
989 }
990
991 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
992 {
993         if (__cq_full(cq))
994                 return -1;
995
996         cq->element[cq->rear] = elem;
997         cq->rear = (cq->rear + 1) & CQ_MASK;
998         return 0;
999 }
1000
1001 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
1002 {
1003         if (__cq_empty(cq))
1004                 return -1;
1005
1006         *elem = cq->element[cq->front];
1007         cq->front = (cq->front + 1) & CQ_MASK;
1008         return 0;
1009 }
1010
1011 static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
1012 {
1013         return (cq->rear - cq->front) & CQ_MASK;
1014 }
1015
1016 static inline void mark_lock_accessed(struct lock_list *lock,
1017                                         struct lock_list *parent)
1018 {
1019         unsigned long nr;
1020
1021         nr = lock - list_entries;
1022         WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
1023         lock->parent = parent;
1024         lock->class->dep_gen_id = lockdep_dependency_gen_id;
1025 }
1026
1027 static inline unsigned long lock_accessed(struct lock_list *lock)
1028 {
1029         unsigned long nr;
1030
1031         nr = lock - list_entries;
1032         WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
1033         return lock->class->dep_gen_id == lockdep_dependency_gen_id;
1034 }
1035
1036 static inline struct lock_list *get_lock_parent(struct lock_list *child)
1037 {
1038         return child->parent;
1039 }
1040
1041 static inline int get_lock_depth(struct lock_list *child)
1042 {
1043         int depth = 0;
1044         struct lock_list *parent;
1045
1046         while ((parent = get_lock_parent(child))) {
1047                 child = parent;
1048                 depth++;
1049         }
1050         return depth;
1051 }
1052
1053 static int __bfs(struct lock_list *source_entry,
1054                  void *data,
1055                  int (*match)(struct lock_list *entry, void *data),
1056                  struct lock_list **target_entry,
1057                  int forward)
1058 {
1059         struct lock_list *entry;
1060         struct list_head *head;
1061         struct circular_queue *cq = &lock_cq;
1062         int ret = 1;
1063
1064         if (match(source_entry, data)) {
1065                 *target_entry = source_entry;
1066                 ret = 0;
1067                 goto exit;
1068         }
1069
1070         if (forward)
1071                 head = &source_entry->class->locks_after;
1072         else
1073                 head = &source_entry->class->locks_before;
1074
1075         if (list_empty(head))
1076                 goto exit;
1077
1078         __cq_init(cq);
1079         __cq_enqueue(cq, (unsigned long)source_entry);
1080
1081         while (!__cq_empty(cq)) {
1082                 struct lock_list *lock;
1083
1084                 __cq_dequeue(cq, (unsigned long *)&lock);
1085
1086                 if (!lock->class) {
1087                         ret = -2;
1088                         goto exit;
1089                 }
1090
1091                 if (forward)
1092                         head = &lock->class->locks_after;
1093                 else
1094                         head = &lock->class->locks_before;
1095
1096                 DEBUG_LOCKS_WARN_ON(!irqs_disabled());
1097
1098                 list_for_each_entry_rcu(entry, head, entry) {
1099                         if (!lock_accessed(entry)) {
1100                                 unsigned int cq_depth;
1101                                 mark_lock_accessed(entry, lock);
1102                                 if (match(entry, data)) {
1103                                         *target_entry = entry;
1104                                         ret = 0;
1105                                         goto exit;
1106                                 }
1107
1108                                 if (__cq_enqueue(cq, (unsigned long)entry)) {
1109                                         ret = -1;
1110                                         goto exit;
1111                                 }
1112                                 cq_depth = __cq_get_elem_count(cq);
1113                                 if (max_bfs_queue_depth < cq_depth)
1114                                         max_bfs_queue_depth = cq_depth;
1115                         }
1116                 }
1117         }
1118 exit:
1119         return ret;
1120 }
1121
1122 static inline int __bfs_forwards(struct lock_list *src_entry,
1123                         void *data,
1124                         int (*match)(struct lock_list *entry, void *data),
1125                         struct lock_list **target_entry)
1126 {
1127         return __bfs(src_entry, data, match, target_entry, 1);
1128
1129 }
1130
1131 static inline int __bfs_backwards(struct lock_list *src_entry,
1132                         void *data,
1133                         int (*match)(struct lock_list *entry, void *data),
1134                         struct lock_list **target_entry)
1135 {
1136         return __bfs(src_entry, data, match, target_entry, 0);
1137
1138 }
1139
1140 /*
1141  * Recursive, forwards-direction lock-dependency checking, used for
1142  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1143  * checking.
1144  */
1145
1146 /*
1147  * Print a dependency chain entry (this is only done when a deadlock
1148  * has been detected):
1149  */
1150 static noinline int
1151 print_circular_bug_entry(struct lock_list *target, int depth)
1152 {
1153         if (debug_locks_silent)
1154                 return 0;
1155         printk("\n-> #%u", depth);
1156         print_lock_name(target->class);
1157         printk(KERN_CONT ":\n");
1158         print_stack_trace(&target->trace, 6);
1159
1160         return 0;
1161 }
1162
1163 static void
1164 print_circular_lock_scenario(struct held_lock *src,
1165                              struct held_lock *tgt,
1166                              struct lock_list *prt)
1167 {
1168         struct lock_class *source = hlock_class(src);
1169         struct lock_class *target = hlock_class(tgt);
1170         struct lock_class *parent = prt->class;
1171
1172         /*
1173          * A direct locking problem where unsafe_class lock is taken
1174          * directly by safe_class lock, then all we need to show
1175          * is the deadlock scenario, as it is obvious that the
1176          * unsafe lock is taken under the safe lock.
1177          *
1178          * But if there is a chain instead, where the safe lock takes
1179          * an intermediate lock (middle_class) where this lock is
1180          * not the same as the safe lock, then the lock chain is
1181          * used to describe the problem. Otherwise we would need
1182          * to show a different CPU case for each link in the chain
1183          * from the safe_class lock to the unsafe_class lock.
1184          */
1185         if (parent != source) {
1186                 printk("Chain exists of:\n  ");
1187                 __print_lock_name(source);
1188                 printk(KERN_CONT " --> ");
1189                 __print_lock_name(parent);
1190                 printk(KERN_CONT " --> ");
1191                 __print_lock_name(target);
1192                 printk(KERN_CONT "\n\n");
1193         }
1194
1195         printk(" Possible unsafe locking scenario:\n\n");
1196         printk("       CPU0                    CPU1\n");
1197         printk("       ----                    ----\n");
1198         printk("  lock(");
1199         __print_lock_name(target);
1200         printk(KERN_CONT ");\n");
1201         printk("                               lock(");
1202         __print_lock_name(parent);
1203         printk(KERN_CONT ");\n");
1204         printk("                               lock(");
1205         __print_lock_name(target);
1206         printk(KERN_CONT ");\n");
1207         printk("  lock(");
1208         __print_lock_name(source);
1209         printk(KERN_CONT ");\n");
1210         printk("\n *** DEADLOCK ***\n\n");
1211 }
1212
1213 /*
1214  * When a circular dependency is detected, print the
1215  * header first:
1216  */
1217 static noinline int
1218 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1219                         struct held_lock *check_src,
1220                         struct held_lock *check_tgt)
1221 {
1222         struct task_struct *curr = current;
1223
1224         if (debug_locks_silent)
1225                 return 0;
1226
1227         pr_warn("\n");
1228         pr_warn("======================================================\n");
1229         pr_warn("WARNING: possible circular locking dependency detected\n");
1230         print_kernel_ident();
1231         pr_warn("------------------------------------------------------\n");
1232         pr_warn("%s/%d is trying to acquire lock:\n",
1233                 curr->comm, task_pid_nr(curr));
1234         print_lock(check_src);
1235
1236         pr_warn("\nbut task is already holding lock:\n");
1237
1238         print_lock(check_tgt);
1239         pr_warn("\nwhich lock already depends on the new lock.\n\n");
1240         pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1241
1242         print_circular_bug_entry(entry, depth);
1243
1244         return 0;
1245 }
1246
1247 static inline int class_equal(struct lock_list *entry, void *data)
1248 {
1249         return entry->class == data;
1250 }
1251
1252 static noinline int print_circular_bug(struct lock_list *this,
1253                                 struct lock_list *target,
1254                                 struct held_lock *check_src,
1255                                 struct held_lock *check_tgt,
1256                                 struct stack_trace *trace)
1257 {
1258         struct task_struct *curr = current;
1259         struct lock_list *parent;
1260         struct lock_list *first_parent;
1261         int depth;
1262
1263         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1264                 return 0;
1265
1266         if (!save_trace(&this->trace))
1267                 return 0;
1268
1269         depth = get_lock_depth(target);
1270
1271         print_circular_bug_header(target, depth, check_src, check_tgt);
1272
1273         parent = get_lock_parent(target);
1274         first_parent = parent;
1275
1276         while (parent) {
1277                 print_circular_bug_entry(parent, --depth);
1278                 parent = get_lock_parent(parent);
1279         }
1280
1281         printk("\nother info that might help us debug this:\n\n");
1282         print_circular_lock_scenario(check_src, check_tgt,
1283                                      first_parent);
1284
1285         lockdep_print_held_locks(curr);
1286
1287         printk("\nstack backtrace:\n");
1288         dump_stack();
1289
1290         return 0;
1291 }
1292
1293 static noinline int print_bfs_bug(int ret)
1294 {
1295         if (!debug_locks_off_graph_unlock())
1296                 return 0;
1297
1298         /*
1299          * Breadth-first-search failed, graph got corrupted?
1300          */
1301         WARN(1, "lockdep bfs error:%d\n", ret);
1302
1303         return 0;
1304 }
1305
1306 static int noop_count(struct lock_list *entry, void *data)
1307 {
1308         (*(unsigned long *)data)++;
1309         return 0;
1310 }
1311
1312 static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1313 {
1314         unsigned long  count = 0;
1315         struct lock_list *uninitialized_var(target_entry);
1316
1317         __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1318
1319         return count;
1320 }
1321 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1322 {
1323         unsigned long ret, flags;
1324         struct lock_list this;
1325
1326         this.parent = NULL;
1327         this.class = class;
1328
1329         raw_local_irq_save(flags);
1330         arch_spin_lock(&lockdep_lock);
1331         ret = __lockdep_count_forward_deps(&this);
1332         arch_spin_unlock(&lockdep_lock);
1333         raw_local_irq_restore(flags);
1334
1335         return ret;
1336 }
1337
1338 static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1339 {
1340         unsigned long  count = 0;
1341         struct lock_list *uninitialized_var(target_entry);
1342
1343         __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1344
1345         return count;
1346 }
1347
1348 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1349 {
1350         unsigned long ret, flags;
1351         struct lock_list this;
1352
1353         this.parent = NULL;
1354         this.class = class;
1355
1356         raw_local_irq_save(flags);
1357         arch_spin_lock(&lockdep_lock);
1358         ret = __lockdep_count_backward_deps(&this);
1359         arch_spin_unlock(&lockdep_lock);
1360         raw_local_irq_restore(flags);
1361
1362         return ret;
1363 }
1364
1365 /*
1366  * Prove that the dependency graph starting at <entry> can not
1367  * lead to <target>. Print an error and return 0 if it does.
1368  */
1369 static noinline int
1370 check_noncircular(struct lock_list *root, struct lock_class *target,
1371                 struct lock_list **target_entry)
1372 {
1373         int result;
1374
1375         debug_atomic_inc(nr_cyclic_checks);
1376
1377         result = __bfs_forwards(root, target, class_equal, target_entry);
1378
1379         return result;
1380 }
1381
1382 static noinline int
1383 check_redundant(struct lock_list *root, struct lock_class *target,
1384                 struct lock_list **target_entry)
1385 {
1386         int result;
1387
1388         debug_atomic_inc(nr_redundant_checks);
1389
1390         result = __bfs_forwards(root, target, class_equal, target_entry);
1391
1392         return result;
1393 }
1394
1395 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1396 /*
1397  * Forwards and backwards subgraph searching, for the purposes of
1398  * proving that two subgraphs can be connected by a new dependency
1399  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1400  */
1401
1402 static inline int usage_match(struct lock_list *entry, void *bit)
1403 {
1404         return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1405 }
1406
1407
1408
1409 /*
1410  * Find a node in the forwards-direction dependency sub-graph starting
1411  * at @root->class that matches @bit.
1412  *
1413  * Return 0 if such a node exists in the subgraph, and put that node
1414  * into *@target_entry.
1415  *
1416  * Return 1 otherwise and keep *@target_entry unchanged.
1417  * Return <0 on error.
1418  */
1419 static int
1420 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1421                         struct lock_list **target_entry)
1422 {
1423         int result;
1424
1425         debug_atomic_inc(nr_find_usage_forwards_checks);
1426
1427         result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1428
1429         return result;
1430 }
1431
1432 /*
1433  * Find a node in the backwards-direction dependency sub-graph starting
1434  * at @root->class that matches @bit.
1435  *
1436  * Return 0 if such a node exists in the subgraph, and put that node
1437  * into *@target_entry.
1438  *
1439  * Return 1 otherwise and keep *@target_entry unchanged.
1440  * Return <0 on error.
1441  */
1442 static int
1443 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1444                         struct lock_list **target_entry)
1445 {
1446         int result;
1447
1448         debug_atomic_inc(nr_find_usage_backwards_checks);
1449
1450         result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1451
1452         return result;
1453 }
1454
1455 static void print_lock_class_header(struct lock_class *class, int depth)
1456 {
1457         int bit;
1458
1459         printk("%*s->", depth, "");
1460         print_lock_name(class);
1461 #ifdef CONFIG_DEBUG_LOCKDEP
1462         printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
1463 #endif
1464         printk(KERN_CONT " {\n");
1465
1466         for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1467                 if (class->usage_mask & (1 << bit)) {
1468                         int len = depth;
1469
1470                         len += printk("%*s   %s", depth, "", usage_str[bit]);
1471                         len += printk(KERN_CONT " at:\n");
1472                         print_stack_trace(class->usage_traces + bit, len);
1473                 }
1474         }
1475         printk("%*s }\n", depth, "");
1476
1477         printk("%*s ... key      at: [<%px>] %pS\n",
1478                 depth, "", class->key, class->key);
1479 }
1480
1481 /*
1482  * printk the shortest lock dependencies from @start to @end in reverse order:
1483  */
1484 static void __used
1485 print_shortest_lock_dependencies(struct lock_list *leaf,
1486                                 struct lock_list *root)
1487 {
1488         struct lock_list *entry = leaf;
1489         int depth;
1490
1491         /*compute depth from generated tree by BFS*/
1492         depth = get_lock_depth(leaf);
1493
1494         do {
1495                 print_lock_class_header(entry->class, depth);
1496                 printk("%*s ... acquired at:\n", depth, "");
1497                 print_stack_trace(&entry->trace, 2);
1498                 printk("\n");
1499
1500                 if (depth == 0 && (entry != root)) {
1501                         printk("lockdep:%s bad path found in chain graph\n", __func__);
1502                         break;
1503                 }
1504
1505                 entry = get_lock_parent(entry);
1506                 depth--;
1507         } while (entry && (depth >= 0));
1508
1509         return;
1510 }
1511
1512 static void
1513 print_irq_lock_scenario(struct lock_list *safe_entry,
1514                         struct lock_list *unsafe_entry,
1515                         struct lock_class *prev_class,
1516                         struct lock_class *next_class)
1517 {
1518         struct lock_class *safe_class = safe_entry->class;
1519         struct lock_class *unsafe_class = unsafe_entry->class;
1520         struct lock_class *middle_class = prev_class;
1521
1522         if (middle_class == safe_class)
1523                 middle_class = next_class;
1524
1525         /*
1526          * A direct locking problem where unsafe_class lock is taken
1527          * directly by safe_class lock, then all we need to show
1528          * is the deadlock scenario, as it is obvious that the
1529          * unsafe lock is taken under the safe lock.
1530          *
1531          * But if there is a chain instead, where the safe lock takes
1532          * an intermediate lock (middle_class) where this lock is
1533          * not the same as the safe lock, then the lock chain is
1534          * used to describe the problem. Otherwise we would need
1535          * to show a different CPU case for each link in the chain
1536          * from the safe_class lock to the unsafe_class lock.
1537          */
1538         if (middle_class != unsafe_class) {
1539                 printk("Chain exists of:\n  ");
1540                 __print_lock_name(safe_class);
1541                 printk(KERN_CONT " --> ");
1542                 __print_lock_name(middle_class);
1543                 printk(KERN_CONT " --> ");
1544                 __print_lock_name(unsafe_class);
1545                 printk(KERN_CONT "\n\n");
1546         }
1547
1548         printk(" Possible interrupt unsafe locking scenario:\n\n");
1549         printk("       CPU0                    CPU1\n");
1550         printk("       ----                    ----\n");
1551         printk("  lock(");
1552         __print_lock_name(unsafe_class);
1553         printk(KERN_CONT ");\n");
1554         printk("                               local_irq_disable();\n");
1555         printk("                               lock(");
1556         __print_lock_name(safe_class);
1557         printk(KERN_CONT ");\n");
1558         printk("                               lock(");
1559         __print_lock_name(middle_class);
1560         printk(KERN_CONT ");\n");
1561         printk("  <Interrupt>\n");
1562         printk("    lock(");
1563         __print_lock_name(safe_class);
1564         printk(KERN_CONT ");\n");
1565         printk("\n *** DEADLOCK ***\n\n");
1566 }
1567
1568 static int
1569 print_bad_irq_dependency(struct task_struct *curr,
1570                          struct lock_list *prev_root,
1571                          struct lock_list *next_root,
1572                          struct lock_list *backwards_entry,
1573                          struct lock_list *forwards_entry,
1574                          struct held_lock *prev,
1575                          struct held_lock *next,
1576                          enum lock_usage_bit bit1,
1577                          enum lock_usage_bit bit2,
1578                          const char *irqclass)
1579 {
1580         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1581                 return 0;
1582
1583         pr_warn("\n");
1584         pr_warn("=====================================================\n");
1585         pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1586                 irqclass, irqclass);
1587         print_kernel_ident();
1588         pr_warn("-----------------------------------------------------\n");
1589         pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1590                 curr->comm, task_pid_nr(curr),
1591                 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1592                 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1593                 curr->hardirqs_enabled,
1594                 curr->softirqs_enabled);
1595         print_lock(next);
1596
1597         pr_warn("\nand this task is already holding:\n");
1598         print_lock(prev);
1599         pr_warn("which would create a new lock dependency:\n");
1600         print_lock_name(hlock_class(prev));
1601         pr_cont(" ->");
1602         print_lock_name(hlock_class(next));
1603         pr_cont("\n");
1604
1605         pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1606                 irqclass);
1607         print_lock_name(backwards_entry->class);
1608         pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1609
1610         print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1611
1612         pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1613         print_lock_name(forwards_entry->class);
1614         pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1615         pr_warn("...");
1616
1617         print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1618
1619         pr_warn("\nother info that might help us debug this:\n\n");
1620         print_irq_lock_scenario(backwards_entry, forwards_entry,
1621                                 hlock_class(prev), hlock_class(next));
1622
1623         lockdep_print_held_locks(curr);
1624
1625         pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1626         if (!save_trace(&prev_root->trace))
1627                 return 0;
1628         print_shortest_lock_dependencies(backwards_entry, prev_root);
1629
1630         pr_warn("\nthe dependencies between the lock to be acquired");
1631         pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
1632         if (!save_trace(&next_root->trace))
1633                 return 0;
1634         print_shortest_lock_dependencies(forwards_entry, next_root);
1635
1636         pr_warn("\nstack backtrace:\n");
1637         dump_stack();
1638
1639         return 0;
1640 }
1641
1642 static int
1643 check_usage(struct task_struct *curr, struct held_lock *prev,
1644             struct held_lock *next, enum lock_usage_bit bit_backwards,
1645             enum lock_usage_bit bit_forwards, const char *irqclass)
1646 {
1647         int ret;
1648         struct lock_list this, that;
1649         struct lock_list *uninitialized_var(target_entry);
1650         struct lock_list *uninitialized_var(target_entry1);
1651
1652         this.parent = NULL;
1653
1654         this.class = hlock_class(prev);
1655         ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1656         if (ret < 0)
1657                 return print_bfs_bug(ret);
1658         if (ret == 1)
1659                 return ret;
1660
1661         that.parent = NULL;
1662         that.class = hlock_class(next);
1663         ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1664         if (ret < 0)
1665                 return print_bfs_bug(ret);
1666         if (ret == 1)
1667                 return ret;
1668
1669         return print_bad_irq_dependency(curr, &this, &that,
1670                         target_entry, target_entry1,
1671                         prev, next,
1672                         bit_backwards, bit_forwards, irqclass);
1673 }
1674
1675 static const char *state_names[] = {
1676 #define LOCKDEP_STATE(__STATE) \
1677         __stringify(__STATE),
1678 #include "lockdep_states.h"
1679 #undef LOCKDEP_STATE
1680 };
1681
1682 static const char *state_rnames[] = {
1683 #define LOCKDEP_STATE(__STATE) \
1684         __stringify(__STATE)"-READ",
1685 #include "lockdep_states.h"
1686 #undef LOCKDEP_STATE
1687 };
1688
1689 static inline const char *state_name(enum lock_usage_bit bit)
1690 {
1691         return (bit & LOCK_USAGE_READ_MASK) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1692 }
1693
1694 static int exclusive_bit(int new_bit)
1695 {
1696         int state = new_bit & LOCK_USAGE_STATE_MASK;
1697         int dir = new_bit & LOCK_USAGE_DIR_MASK;
1698
1699         /*
1700          * keep state, bit flip the direction and strip read.
1701          */
1702         return state | (dir ^ LOCK_USAGE_DIR_MASK);
1703 }
1704
1705 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1706                            struct held_lock *next, enum lock_usage_bit bit)
1707 {
1708         /*
1709          * Prove that the new dependency does not connect a hardirq-safe
1710          * lock with a hardirq-unsafe lock - to achieve this we search
1711          * the backwards-subgraph starting at <prev>, and the
1712          * forwards-subgraph starting at <next>:
1713          */
1714         if (!check_usage(curr, prev, next, bit,
1715                            exclusive_bit(bit), state_name(bit)))
1716                 return 0;
1717
1718         bit++; /* _READ */
1719
1720         /*
1721          * Prove that the new dependency does not connect a hardirq-safe-read
1722          * lock with a hardirq-unsafe lock - to achieve this we search
1723          * the backwards-subgraph starting at <prev>, and the
1724          * forwards-subgraph starting at <next>:
1725          */
1726         if (!check_usage(curr, prev, next, bit,
1727                            exclusive_bit(bit), state_name(bit)))
1728                 return 0;
1729
1730         return 1;
1731 }
1732
1733 static int
1734 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1735                 struct held_lock *next)
1736 {
1737 #define LOCKDEP_STATE(__STATE)                                          \
1738         if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1739                 return 0;
1740 #include "lockdep_states.h"
1741 #undef LOCKDEP_STATE
1742
1743         return 1;
1744 }
1745
1746 static void inc_chains(void)
1747 {
1748         if (current->hardirq_context)
1749                 nr_hardirq_chains++;
1750         else {
1751                 if (current->softirq_context)
1752                         nr_softirq_chains++;
1753                 else
1754                         nr_process_chains++;
1755         }
1756 }
1757
1758 #else
1759
1760 static inline int
1761 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1762                 struct held_lock *next)
1763 {
1764         return 1;
1765 }
1766
1767 static inline void inc_chains(void)
1768 {
1769         nr_process_chains++;
1770 }
1771
1772 #endif
1773
1774 static void
1775 print_deadlock_scenario(struct held_lock *nxt,
1776                              struct held_lock *prv)
1777 {
1778         struct lock_class *next = hlock_class(nxt);
1779         struct lock_class *prev = hlock_class(prv);
1780
1781         printk(" Possible unsafe locking scenario:\n\n");
1782         printk("       CPU0\n");
1783         printk("       ----\n");
1784         printk("  lock(");
1785         __print_lock_name(prev);
1786         printk(KERN_CONT ");\n");
1787         printk("  lock(");
1788         __print_lock_name(next);
1789         printk(KERN_CONT ");\n");
1790         printk("\n *** DEADLOCK ***\n\n");
1791         printk(" May be due to missing lock nesting notation\n\n");
1792 }
1793
1794 static int
1795 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1796                    struct held_lock *next)
1797 {
1798         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1799                 return 0;
1800
1801         pr_warn("\n");
1802         pr_warn("============================================\n");
1803         pr_warn("WARNING: possible recursive locking detected\n");
1804         print_kernel_ident();
1805         pr_warn("--------------------------------------------\n");
1806         pr_warn("%s/%d is trying to acquire lock:\n",
1807                 curr->comm, task_pid_nr(curr));
1808         print_lock(next);
1809         pr_warn("\nbut task is already holding lock:\n");
1810         print_lock(prev);
1811
1812         pr_warn("\nother info that might help us debug this:\n");
1813         print_deadlock_scenario(next, prev);
1814         lockdep_print_held_locks(curr);
1815
1816         pr_warn("\nstack backtrace:\n");
1817         dump_stack();
1818
1819         return 0;
1820 }
1821
1822 /*
1823  * Check whether we are holding such a class already.
1824  *
1825  * (Note that this has to be done separately, because the graph cannot
1826  * detect such classes of deadlocks.)
1827  *
1828  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1829  */
1830 static int
1831 check_deadlock(struct task_struct *curr, struct held_lock *next,
1832                struct lockdep_map *next_instance, int read)
1833 {
1834         struct held_lock *prev;
1835         struct held_lock *nest = NULL;
1836         int i;
1837
1838         for (i = 0; i < curr->lockdep_depth; i++) {
1839                 prev = curr->held_locks + i;
1840
1841                 if (prev->instance == next->nest_lock)
1842                         nest = prev;
1843
1844                 if (hlock_class(prev) != hlock_class(next))
1845                         continue;
1846
1847                 /*
1848                  * Allow read-after-read recursion of the same
1849                  * lock class (i.e. read_lock(lock)+read_lock(lock)):
1850                  */
1851                 if ((read == 2) && prev->read)
1852                         return 2;
1853
1854                 /*
1855                  * We're holding the nest_lock, which serializes this lock's
1856                  * nesting behaviour.
1857                  */
1858                 if (nest)
1859                         return 2;
1860
1861                 return print_deadlock_bug(curr, prev, next);
1862         }
1863         return 1;
1864 }
1865
1866 /*
1867  * There was a chain-cache miss, and we are about to add a new dependency
1868  * to a previous lock. We recursively validate the following rules:
1869  *
1870  *  - would the adding of the <prev> -> <next> dependency create a
1871  *    circular dependency in the graph? [== circular deadlock]
1872  *
1873  *  - does the new prev->next dependency connect any hardirq-safe lock
1874  *    (in the full backwards-subgraph starting at <prev>) with any
1875  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1876  *    <next>)? [== illegal lock inversion with hardirq contexts]
1877  *
1878  *  - does the new prev->next dependency connect any softirq-safe lock
1879  *    (in the full backwards-subgraph starting at <prev>) with any
1880  *    softirq-unsafe lock (in the full forwards-subgraph starting at
1881  *    <next>)? [== illegal lock inversion with softirq contexts]
1882  *
1883  * any of these scenarios could lead to a deadlock.
1884  *
1885  * Then if all the validations pass, we add the forwards and backwards
1886  * dependency.
1887  */
1888 static int
1889 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1890                struct held_lock *next, int distance, struct stack_trace *trace,
1891                int (*save)(struct stack_trace *trace))
1892 {
1893         struct lock_list *uninitialized_var(target_entry);
1894         struct lock_list *entry;
1895         struct lock_list this;
1896         int ret;
1897
1898         if (!hlock_class(prev)->key || !hlock_class(next)->key) {
1899                 /*
1900                  * The warning statements below may trigger a use-after-free
1901                  * of the class name. It is better to trigger a use-after free
1902                  * and to have the class name most of the time instead of not
1903                  * having the class name available.
1904                  */
1905                 WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
1906                           "Detected use-after-free of lock class %px/%s\n",
1907                           hlock_class(prev),
1908                           hlock_class(prev)->name);
1909                 WARN_ONCE(!debug_locks_silent && !hlock_class(next)->key,
1910                           "Detected use-after-free of lock class %px/%s\n",
1911                           hlock_class(next),
1912                           hlock_class(next)->name);
1913                 return 2;
1914         }
1915
1916         /*
1917          * Prove that the new <prev> -> <next> dependency would not
1918          * create a circular dependency in the graph. (We do this by
1919          * forward-recursing into the graph starting at <next>, and
1920          * checking whether we can reach <prev>.)
1921          *
1922          * We are using global variables to control the recursion, to
1923          * keep the stackframe size of the recursive functions low:
1924          */
1925         this.class = hlock_class(next);
1926         this.parent = NULL;
1927         ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1928         if (unlikely(!ret)) {
1929                 if (!trace->entries) {
1930                         /*
1931                          * If @save fails here, the printing might trigger
1932                          * a WARN but because of the !nr_entries it should
1933                          * not do bad things.
1934                          */
1935                         save(trace);
1936                 }
1937                 return print_circular_bug(&this, target_entry, next, prev, trace);
1938         }
1939         else if (unlikely(ret < 0))
1940                 return print_bfs_bug(ret);
1941
1942         if (!check_prev_add_irq(curr, prev, next))
1943                 return 0;
1944
1945         /*
1946          * For recursive read-locks we do all the dependency checks,
1947          * but we dont store read-triggered dependencies (only
1948          * write-triggered dependencies). This ensures that only the
1949          * write-side dependencies matter, and that if for example a
1950          * write-lock never takes any other locks, then the reads are
1951          * equivalent to a NOP.
1952          */
1953         if (next->read == 2 || prev->read == 2)
1954                 return 1;
1955         /*
1956          * Is the <prev> -> <next> dependency already present?
1957          *
1958          * (this may occur even though this is a new chain: consider
1959          *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1960          *  chains - the second one will be new, but L1 already has
1961          *  L2 added to its dependency list, due to the first chain.)
1962          */
1963         list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1964                 if (entry->class == hlock_class(next)) {
1965                         if (distance == 1)
1966                                 entry->distance = 1;
1967                         return 1;
1968                 }
1969         }
1970
1971         /*
1972          * Is the <prev> -> <next> link redundant?
1973          */
1974         this.class = hlock_class(prev);
1975         this.parent = NULL;
1976         ret = check_redundant(&this, hlock_class(next), &target_entry);
1977         if (!ret) {
1978                 debug_atomic_inc(nr_redundant);
1979                 return 2;
1980         }
1981         if (ret < 0)
1982                 return print_bfs_bug(ret);
1983
1984
1985         if (!trace->entries && !save(trace))
1986                 return 0;
1987
1988         /*
1989          * Ok, all validations passed, add the new lock
1990          * to the previous lock's dependency list:
1991          */
1992         ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1993                                &hlock_class(prev)->locks_after,
1994                                next->acquire_ip, distance, trace);
1995
1996         if (!ret)
1997                 return 0;
1998
1999         ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
2000                                &hlock_class(next)->locks_before,
2001                                next->acquire_ip, distance, trace);
2002         if (!ret)
2003                 return 0;
2004
2005         return 2;
2006 }
2007
2008 /*
2009  * Add the dependency to all directly-previous locks that are 'relevant'.
2010  * The ones that are relevant are (in increasing distance from curr):
2011  * all consecutive trylock entries and the final non-trylock entry - or
2012  * the end of this context's lock-chain - whichever comes first.
2013  */
2014 static int
2015 check_prevs_add(struct task_struct *curr, struct held_lock *next)
2016 {
2017         int depth = curr->lockdep_depth;
2018         struct held_lock *hlock;
2019         struct stack_trace trace = {
2020                 .nr_entries = 0,
2021                 .max_entries = 0,
2022                 .entries = NULL,
2023                 .skip = 0,
2024         };
2025
2026         /*
2027          * Debugging checks.
2028          *
2029          * Depth must not be zero for a non-head lock:
2030          */
2031         if (!depth)
2032                 goto out_bug;
2033         /*
2034          * At least two relevant locks must exist for this
2035          * to be a head:
2036          */
2037         if (curr->held_locks[depth].irq_context !=
2038                         curr->held_locks[depth-1].irq_context)
2039                 goto out_bug;
2040
2041         for (;;) {
2042                 int distance = curr->lockdep_depth - depth + 1;
2043                 hlock = curr->held_locks + depth - 1;
2044
2045                 /*
2046                  * Only non-recursive-read entries get new dependencies
2047                  * added:
2048                  */
2049                 if (hlock->read != 2 && hlock->check) {
2050                         int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
2051                         if (!ret)
2052                                 return 0;
2053
2054                         /*
2055                          * Stop after the first non-trylock entry,
2056                          * as non-trylock entries have added their
2057                          * own direct dependencies already, so this
2058                          * lock is connected to them indirectly:
2059                          */
2060                         if (!hlock->trylock)
2061                                 break;
2062                 }
2063
2064                 depth--;
2065                 /*
2066                  * End of lock-stack?
2067                  */
2068                 if (!depth)
2069                         break;
2070                 /*
2071                  * Stop the search if we cross into another context:
2072                  */
2073                 if (curr->held_locks[depth].irq_context !=
2074                                 curr->held_locks[depth-1].irq_context)
2075                         break;
2076         }
2077         return 1;
2078 out_bug:
2079         if (!debug_locks_off_graph_unlock())
2080                 return 0;
2081
2082         /*
2083          * Clearly we all shouldn't be here, but since we made it we
2084          * can reliable say we messed up our state. See the above two
2085          * gotos for reasons why we could possibly end up here.
2086          */
2087         WARN_ON(1);
2088
2089         return 0;
2090 }
2091
2092 unsigned long nr_lock_chains;
2093 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
2094 int nr_chain_hlocks;
2095 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
2096
2097 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
2098 {
2099         return lock_classes + chain_hlocks[chain->base + i];
2100 }
2101
2102 /*
2103  * Returns the index of the first held_lock of the current chain
2104  */
2105 static inline int get_first_held_lock(struct task_struct *curr,
2106                                         struct held_lock *hlock)
2107 {
2108         int i;
2109         struct held_lock *hlock_curr;
2110
2111         for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2112                 hlock_curr = curr->held_locks + i;
2113                 if (hlock_curr->irq_context != hlock->irq_context)
2114                         break;
2115
2116         }
2117
2118         return ++i;
2119 }
2120
2121 #ifdef CONFIG_DEBUG_LOCKDEP
2122 /*
2123  * Returns the next chain_key iteration
2124  */
2125 static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
2126 {
2127         u64 new_chain_key = iterate_chain_key(chain_key, class_idx);
2128
2129         printk(" class_idx:%d -> chain_key:%016Lx",
2130                 class_idx,
2131                 (unsigned long long)new_chain_key);
2132         return new_chain_key;
2133 }
2134
2135 static void
2136 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next)
2137 {
2138         struct held_lock *hlock;
2139         u64 chain_key = 0;
2140         int depth = curr->lockdep_depth;
2141         int i;
2142
2143         printk("depth: %u\n", depth + 1);
2144         for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
2145                 hlock = curr->held_locks + i;
2146                 chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
2147
2148                 print_lock(hlock);
2149         }
2150
2151         print_chain_key_iteration(hlock_next->class_idx, chain_key);
2152         print_lock(hlock_next);
2153 }
2154
2155 static void print_chain_keys_chain(struct lock_chain *chain)
2156 {
2157         int i;
2158         u64 chain_key = 0;
2159         int class_id;
2160
2161         printk("depth: %u\n", chain->depth);
2162         for (i = 0; i < chain->depth; i++) {
2163                 class_id = chain_hlocks[chain->base + i];
2164                 chain_key = print_chain_key_iteration(class_id + 1, chain_key);
2165
2166                 print_lock_name(lock_classes + class_id);
2167                 printk("\n");
2168         }
2169 }
2170
2171 static void print_collision(struct task_struct *curr,
2172                         struct held_lock *hlock_next,
2173                         struct lock_chain *chain)
2174 {
2175         pr_warn("\n");
2176         pr_warn("============================\n");
2177         pr_warn("WARNING: chain_key collision\n");
2178         print_kernel_ident();
2179         pr_warn("----------------------------\n");
2180         pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2181         pr_warn("Hash chain already cached but the contents don't match!\n");
2182
2183         pr_warn("Held locks:");
2184         print_chain_keys_held_locks(curr, hlock_next);
2185
2186         pr_warn("Locks in cached chain:");
2187         print_chain_keys_chain(chain);
2188
2189         pr_warn("\nstack backtrace:\n");
2190         dump_stack();
2191 }
2192 #endif
2193
2194 /*
2195  * Checks whether the chain and the current held locks are consistent
2196  * in depth and also in content. If they are not it most likely means
2197  * that there was a collision during the calculation of the chain_key.
2198  * Returns: 0 not passed, 1 passed
2199  */
2200 static int check_no_collision(struct task_struct *curr,
2201                         struct held_lock *hlock,
2202                         struct lock_chain *chain)
2203 {
2204 #ifdef CONFIG_DEBUG_LOCKDEP
2205         int i, j, id;
2206
2207         i = get_first_held_lock(curr, hlock);
2208
2209         if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
2210                 print_collision(curr, hlock, chain);
2211                 return 0;
2212         }
2213
2214         for (j = 0; j < chain->depth - 1; j++, i++) {
2215                 id = curr->held_locks[i].class_idx - 1;
2216
2217                 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
2218                         print_collision(curr, hlock, chain);
2219                         return 0;
2220                 }
2221         }
2222 #endif
2223         return 1;
2224 }
2225
2226 /*
2227  * Adds a dependency chain into chain hashtable. And must be called with
2228  * graph_lock held.
2229  *
2230  * Return 0 if fail, and graph_lock is released.
2231  * Return 1 if succeed, with graph_lock held.
2232  */
2233 static inline int add_chain_cache(struct task_struct *curr,
2234                                   struct held_lock *hlock,
2235                                   u64 chain_key)
2236 {
2237         struct lock_class *class = hlock_class(hlock);
2238         struct hlist_head *hash_head = chainhashentry(chain_key);
2239         struct lock_chain *chain;
2240         int i, j;
2241
2242         /*
2243          * Allocate a new chain entry from the static array, and add
2244          * it to the hash:
2245          */
2246
2247         /*
2248          * We might need to take the graph lock, ensure we've got IRQs
2249          * disabled to make this an IRQ-safe lock.. for recursion reasons
2250          * lockdep won't complain about its own locking errors.
2251          */
2252         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2253                 return 0;
2254
2255         if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2256                 if (!debug_locks_off_graph_unlock())
2257                         return 0;
2258
2259                 print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
2260                 dump_stack();
2261                 return 0;
2262         }
2263         chain = lock_chains + nr_lock_chains++;
2264         chain->chain_key = chain_key;
2265         chain->irq_context = hlock->irq_context;
2266         i = get_first_held_lock(curr, hlock);
2267         chain->depth = curr->lockdep_depth + 1 - i;
2268
2269         BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
2270         BUILD_BUG_ON((1UL << 6)  <= ARRAY_SIZE(curr->held_locks));
2271         BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
2272
2273         if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2274                 chain->base = nr_chain_hlocks;
2275                 for (j = 0; j < chain->depth - 1; j++, i++) {
2276                         int lock_id = curr->held_locks[i].class_idx - 1;
2277                         chain_hlocks[chain->base + j] = lock_id;
2278                 }
2279                 chain_hlocks[chain->base + j] = class - lock_classes;
2280                 nr_chain_hlocks += chain->depth;
2281         } else {
2282                 if (!debug_locks_off_graph_unlock())
2283                         return 0;
2284
2285                 print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
2286                 dump_stack();
2287                 return 0;
2288         }
2289
2290         hlist_add_head_rcu(&chain->entry, hash_head);
2291         debug_atomic_inc(chain_lookup_misses);
2292         inc_chains();
2293
2294         return 1;
2295 }
2296
2297 /*
2298  * Look up a dependency chain. Must be called with either the graph lock or
2299  * the RCU read lock held.
2300  */
2301 static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
2302 {
2303         struct hlist_head *hash_head = chainhashentry(chain_key);
2304         struct lock_chain *chain;
2305
2306         hlist_for_each_entry_rcu(chain, hash_head, entry) {
2307                 if (READ_ONCE(chain->chain_key) == chain_key) {
2308                         debug_atomic_inc(chain_lookup_hits);
2309                         return chain;
2310                 }
2311         }
2312         return NULL;
2313 }
2314
2315 /*
2316  * If the key is not present yet in dependency chain cache then
2317  * add it and return 1 - in this case the new dependency chain is
2318  * validated. If the key is already hashed, return 0.
2319  * (On return with 1 graph_lock is held.)
2320  */
2321 static inline int lookup_chain_cache_add(struct task_struct *curr,
2322                                          struct held_lock *hlock,
2323                                          u64 chain_key)
2324 {
2325         struct lock_class *class = hlock_class(hlock);
2326         struct lock_chain *chain = lookup_chain_cache(chain_key);
2327
2328         if (chain) {
2329 cache_hit:
2330                 if (!check_no_collision(curr, hlock, chain))
2331                         return 0;
2332
2333                 if (very_verbose(class)) {
2334                         printk("\nhash chain already cached, key: "
2335                                         "%016Lx tail class: [%px] %s\n",
2336                                         (unsigned long long)chain_key,
2337                                         class->key, class->name);
2338                 }
2339
2340                 return 0;
2341         }
2342
2343         if (very_verbose(class)) {
2344                 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
2345                         (unsigned long long)chain_key, class->key, class->name);
2346         }
2347
2348         if (!graph_lock())
2349                 return 0;
2350
2351         /*
2352          * We have to walk the chain again locked - to avoid duplicates:
2353          */
2354         chain = lookup_chain_cache(chain_key);
2355         if (chain) {
2356                 graph_unlock();
2357                 goto cache_hit;
2358         }
2359
2360         if (!add_chain_cache(curr, hlock, chain_key))
2361                 return 0;
2362
2363         return 1;
2364 }
2365
2366 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2367                 struct held_lock *hlock, int chain_head, u64 chain_key)
2368 {
2369         /*
2370          * Trylock needs to maintain the stack of held locks, but it
2371          * does not add new dependencies, because trylock can be done
2372          * in any order.
2373          *
2374          * We look up the chain_key and do the O(N^2) check and update of
2375          * the dependencies only if this is a new dependency chain.
2376          * (If lookup_chain_cache_add() return with 1 it acquires
2377          * graph_lock for us)
2378          */
2379         if (!hlock->trylock && hlock->check &&
2380             lookup_chain_cache_add(curr, hlock, chain_key)) {
2381                 /*
2382                  * Check whether last held lock:
2383                  *
2384                  * - is irq-safe, if this lock is irq-unsafe
2385                  * - is softirq-safe, if this lock is hardirq-unsafe
2386                  *
2387                  * And check whether the new lock's dependency graph
2388                  * could lead back to the previous lock.
2389                  *
2390                  * any of these scenarios could lead to a deadlock. If
2391                  * All validations
2392                  */
2393                 int ret = check_deadlock(curr, hlock, lock, hlock->read);
2394
2395                 if (!ret)
2396                         return 0;
2397                 /*
2398                  * Mark recursive read, as we jump over it when
2399                  * building dependencies (just like we jump over
2400                  * trylock entries):
2401                  */
2402                 if (ret == 2)
2403                         hlock->read = 2;
2404                 /*
2405                  * Add dependency only if this lock is not the head
2406                  * of the chain, and if it's not a secondary read-lock:
2407                  */
2408                 if (!chain_head && ret != 2) {
2409                         if (!check_prevs_add(curr, hlock))
2410                                 return 0;
2411                 }
2412
2413                 graph_unlock();
2414         } else {
2415                 /* after lookup_chain_cache_add(): */
2416                 if (unlikely(!debug_locks))
2417                         return 0;
2418         }
2419
2420         return 1;
2421 }
2422 #else
2423 static inline int validate_chain(struct task_struct *curr,
2424                 struct lockdep_map *lock, struct held_lock *hlock,
2425                 int chain_head, u64 chain_key)
2426 {
2427         return 1;
2428 }
2429 #endif
2430
2431 /*
2432  * We are building curr_chain_key incrementally, so double-check
2433  * it from scratch, to make sure that it's done correctly:
2434  */
2435 static void check_chain_key(struct task_struct *curr)
2436 {
2437 #ifdef CONFIG_DEBUG_LOCKDEP
2438         struct held_lock *hlock, *prev_hlock = NULL;
2439         unsigned int i;
2440         u64 chain_key = 0;
2441
2442         for (i = 0; i < curr->lockdep_depth; i++) {
2443                 hlock = curr->held_locks + i;
2444                 if (chain_key != hlock->prev_chain_key) {
2445                         debug_locks_off();
2446                         /*
2447                          * We got mighty confused, our chain keys don't match
2448                          * with what we expect, someone trample on our task state?
2449                          */
2450                         WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2451                                 curr->lockdep_depth, i,
2452                                 (unsigned long long)chain_key,
2453                                 (unsigned long long)hlock->prev_chain_key);
2454                         return;
2455                 }
2456                 /*
2457                  * Whoops ran out of static storage again?
2458                  */
2459                 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2460                         return;
2461
2462                 if (prev_hlock && (prev_hlock->irq_context !=
2463                                                         hlock->irq_context))
2464                         chain_key = 0;
2465                 chain_key = iterate_chain_key(chain_key, hlock->class_idx);
2466                 prev_hlock = hlock;
2467         }
2468         if (chain_key != curr->curr_chain_key) {
2469                 debug_locks_off();
2470                 /*
2471                  * More smoking hash instead of calculating it, damn see these
2472                  * numbers float.. I bet that a pink elephant stepped on my memory.
2473                  */
2474                 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2475                         curr->lockdep_depth, i,
2476                         (unsigned long long)chain_key,
2477                         (unsigned long long)curr->curr_chain_key);
2478         }
2479 #endif
2480 }
2481
2482 static void
2483 print_usage_bug_scenario(struct held_lock *lock)
2484 {
2485         struct lock_class *class = hlock_class(lock);
2486
2487         printk(" Possible unsafe locking scenario:\n\n");
2488         printk("       CPU0\n");
2489         printk("       ----\n");
2490         printk("  lock(");
2491         __print_lock_name(class);
2492         printk(KERN_CONT ");\n");
2493         printk("  <Interrupt>\n");
2494         printk("    lock(");
2495         __print_lock_name(class);
2496         printk(KERN_CONT ");\n");
2497         printk("\n *** DEADLOCK ***\n\n");
2498 }
2499
2500 static int
2501 print_usage_bug(struct task_struct *curr, struct held_lock *this,
2502                 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2503 {
2504         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2505                 return 0;
2506
2507         pr_warn("\n");
2508         pr_warn("================================\n");
2509         pr_warn("WARNING: inconsistent lock state\n");
2510         print_kernel_ident();
2511         pr_warn("--------------------------------\n");
2512
2513         pr_warn("inconsistent {%s} -> {%s} usage.\n",
2514                 usage_str[prev_bit], usage_str[new_bit]);
2515
2516         pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2517                 curr->comm, task_pid_nr(curr),
2518                 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2519                 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2520                 trace_hardirqs_enabled(curr),
2521                 trace_softirqs_enabled(curr));
2522         print_lock(this);
2523
2524         pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2525         print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2526
2527         print_irqtrace_events(curr);
2528         pr_warn("\nother info that might help us debug this:\n");
2529         print_usage_bug_scenario(this);
2530
2531         lockdep_print_held_locks(curr);
2532
2533         pr_warn("\nstack backtrace:\n");
2534         dump_stack();
2535
2536         return 0;
2537 }
2538
2539 /*
2540  * Print out an error if an invalid bit is set:
2541  */
2542 static inline int
2543 valid_state(struct task_struct *curr, struct held_lock *this,
2544             enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2545 {
2546         if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2547                 return print_usage_bug(curr, this, bad_bit, new_bit);
2548         return 1;
2549 }
2550
2551 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2552                      enum lock_usage_bit new_bit);
2553
2554 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2555
2556 /*
2557  * print irq inversion bug:
2558  */
2559 static int
2560 print_irq_inversion_bug(struct task_struct *curr,
2561                         struct lock_list *root, struct lock_list *other,
2562                         struct held_lock *this, int forwards,
2563                         const char *irqclass)
2564 {
2565         struct lock_list *entry = other;
2566         struct lock_list *middle = NULL;
2567         int depth;
2568
2569         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2570                 return 0;
2571
2572         pr_warn("\n");
2573         pr_warn("========================================================\n");
2574         pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2575         print_kernel_ident();
2576         pr_warn("--------------------------------------------------------\n");
2577         pr_warn("%s/%d just changed the state of lock:\n",
2578                 curr->comm, task_pid_nr(curr));
2579         print_lock(this);
2580         if (forwards)
2581                 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2582         else
2583                 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2584         print_lock_name(other->class);
2585         pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2586
2587         pr_warn("\nother info that might help us debug this:\n");
2588
2589         /* Find a middle lock (if one exists) */
2590         depth = get_lock_depth(other);
2591         do {
2592                 if (depth == 0 && (entry != root)) {
2593                         pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
2594                         break;
2595                 }
2596                 middle = entry;
2597                 entry = get_lock_parent(entry);
2598                 depth--;
2599         } while (entry && entry != root && (depth >= 0));
2600         if (forwards)
2601                 print_irq_lock_scenario(root, other,
2602                         middle ? middle->class : root->class, other->class);
2603         else
2604                 print_irq_lock_scenario(other, root,
2605                         middle ? middle->class : other->class, root->class);
2606
2607         lockdep_print_held_locks(curr);
2608
2609         pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2610         if (!save_trace(&root->trace))
2611                 return 0;
2612         print_shortest_lock_dependencies(other, root);
2613
2614         pr_warn("\nstack backtrace:\n");
2615         dump_stack();
2616
2617         return 0;
2618 }
2619
2620 /*
2621  * Prove that in the forwards-direction subgraph starting at <this>
2622  * there is no lock matching <mask>:
2623  */
2624 static int
2625 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2626                      enum lock_usage_bit bit, const char *irqclass)
2627 {
2628         int ret;
2629         struct lock_list root;
2630         struct lock_list *uninitialized_var(target_entry);
2631
2632         root.parent = NULL;
2633         root.class = hlock_class(this);
2634         ret = find_usage_forwards(&root, bit, &target_entry);
2635         if (ret < 0)
2636                 return print_bfs_bug(ret);
2637         if (ret == 1)
2638                 return ret;
2639
2640         return print_irq_inversion_bug(curr, &root, target_entry,
2641                                         this, 1, irqclass);
2642 }
2643
2644 /*
2645  * Prove that in the backwards-direction subgraph starting at <this>
2646  * there is no lock matching <mask>:
2647  */
2648 static int
2649 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2650                       enum lock_usage_bit bit, const char *irqclass)
2651 {
2652         int ret;
2653         struct lock_list root;
2654         struct lock_list *uninitialized_var(target_entry);
2655
2656         root.parent = NULL;
2657         root.class = hlock_class(this);
2658         ret = find_usage_backwards(&root, bit, &target_entry);
2659         if (ret < 0)
2660                 return print_bfs_bug(ret);
2661         if (ret == 1)
2662                 return ret;
2663
2664         return print_irq_inversion_bug(curr, &root, target_entry,
2665                                         this, 0, irqclass);
2666 }
2667
2668 void print_irqtrace_events(struct task_struct *curr)
2669 {
2670         printk("irq event stamp: %u\n", curr->irq_events);
2671         printk("hardirqs last  enabled at (%u): [<%px>] %pS\n",
2672                 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
2673                 (void *)curr->hardirq_enable_ip);
2674         printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
2675                 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
2676                 (void *)curr->hardirq_disable_ip);
2677         printk("softirqs last  enabled at (%u): [<%px>] %pS\n",
2678                 curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
2679                 (void *)curr->softirq_enable_ip);
2680         printk("softirqs last disabled at (%u): [<%px>] %pS\n",
2681                 curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
2682                 (void *)curr->softirq_disable_ip);
2683 }
2684
2685 static int HARDIRQ_verbose(struct lock_class *class)
2686 {
2687 #if HARDIRQ_VERBOSE
2688         return class_filter(class);
2689 #endif
2690         return 0;
2691 }
2692
2693 static int SOFTIRQ_verbose(struct lock_class *class)
2694 {
2695 #if SOFTIRQ_VERBOSE
2696         return class_filter(class);
2697 #endif
2698         return 0;
2699 }
2700
2701 #define STRICT_READ_CHECKS      1
2702
2703 static int (*state_verbose_f[])(struct lock_class *class) = {
2704 #define LOCKDEP_STATE(__STATE) \
2705         __STATE##_verbose,
2706 #include "lockdep_states.h"
2707 #undef LOCKDEP_STATE
2708 };
2709
2710 static inline int state_verbose(enum lock_usage_bit bit,
2711                                 struct lock_class *class)
2712 {
2713         return state_verbose_f[bit >> 2](class);
2714 }
2715
2716 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2717                              enum lock_usage_bit bit, const char *name);
2718
2719 static int
2720 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2721                 enum lock_usage_bit new_bit)
2722 {
2723         int excl_bit = exclusive_bit(new_bit);
2724         int read = new_bit & LOCK_USAGE_READ_MASK;
2725         int dir = new_bit & LOCK_USAGE_DIR_MASK;
2726
2727         /*
2728          * mark USED_IN has to look forwards -- to ensure no dependency
2729          * has ENABLED state, which would allow recursion deadlocks.
2730          *
2731          * mark ENABLED has to look backwards -- to ensure no dependee
2732          * has USED_IN state, which, again, would allow  recursion deadlocks.
2733          */
2734         check_usage_f usage = dir ?
2735                 check_usage_backwards : check_usage_forwards;
2736
2737         /*
2738          * Validate that this particular lock does not have conflicting
2739          * usage states.
2740          */
2741         if (!valid_state(curr, this, new_bit, excl_bit))
2742                 return 0;
2743
2744         /*
2745          * Validate that the lock dependencies don't have conflicting usage
2746          * states.
2747          */
2748         if ((!read || !dir || STRICT_READ_CHECKS) &&
2749                         !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
2750                 return 0;
2751
2752         /*
2753          * Check for read in write conflicts
2754          */
2755         if (!read) {
2756                 if (!valid_state(curr, this, new_bit, excl_bit + LOCK_USAGE_READ_MASK))
2757                         return 0;
2758
2759                 if (STRICT_READ_CHECKS &&
2760                         !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
2761                                 state_name(new_bit + LOCK_USAGE_READ_MASK)))
2762                         return 0;
2763         }
2764
2765         if (state_verbose(new_bit, hlock_class(this)))
2766                 return 2;
2767
2768         return 1;
2769 }
2770
2771 /*
2772  * Mark all held locks with a usage bit:
2773  */
2774 static int
2775 mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit)
2776 {
2777         struct held_lock *hlock;
2778         int i;
2779
2780         for (i = 0; i < curr->lockdep_depth; i++) {
2781                 enum lock_usage_bit hlock_bit = base_bit;
2782                 hlock = curr->held_locks + i;
2783
2784                 if (hlock->read)
2785                         hlock_bit += LOCK_USAGE_READ_MASK;
2786
2787                 BUG_ON(hlock_bit >= LOCK_USAGE_STATES);
2788
2789                 if (!hlock->check)
2790                         continue;
2791
2792                 if (!mark_lock(curr, hlock, hlock_bit))
2793                         return 0;
2794         }
2795
2796         return 1;
2797 }
2798
2799 /*
2800  * Hardirqs will be enabled:
2801  */
2802 static void __trace_hardirqs_on_caller(unsigned long ip)
2803 {
2804         struct task_struct *curr = current;
2805
2806         /* we'll do an OFF -> ON transition: */
2807         curr->hardirqs_enabled = 1;
2808
2809         /*
2810          * We are going to turn hardirqs on, so set the
2811          * usage bit for all held locks:
2812          */
2813         if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ))
2814                 return;
2815         /*
2816          * If we have softirqs enabled, then set the usage
2817          * bit for all held locks. (disabled hardirqs prevented
2818          * this bit from being set before)
2819          */
2820         if (curr->softirqs_enabled)
2821                 if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
2822                         return;
2823
2824         curr->hardirq_enable_ip = ip;
2825         curr->hardirq_enable_event = ++curr->irq_events;
2826         debug_atomic_inc(hardirqs_on_events);
2827 }
2828
2829 void lockdep_hardirqs_on(unsigned long ip)
2830 {
2831         if (unlikely(!debug_locks || current->lockdep_recursion))
2832                 return;
2833
2834         if (unlikely(current->hardirqs_enabled)) {
2835                 /*
2836                  * Neither irq nor preemption are disabled here
2837                  * so this is racy by nature but losing one hit
2838                  * in a stat is not a big deal.
2839                  */
2840                 __debug_atomic_inc(redundant_hardirqs_on);
2841                 return;
2842         }
2843
2844         /*
2845          * We're enabling irqs and according to our state above irqs weren't
2846          * already enabled, yet we find the hardware thinks they are in fact
2847          * enabled.. someone messed up their IRQ state tracing.
2848          */
2849         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2850                 return;
2851
2852         /*
2853          * See the fine text that goes along with this variable definition.
2854          */
2855         if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2856                 return;
2857
2858         /*
2859          * Can't allow enabling interrupts while in an interrupt handler,
2860          * that's general bad form and such. Recursion, limited stack etc..
2861          */
2862         if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2863                 return;
2864
2865         current->lockdep_recursion = 1;
2866         __trace_hardirqs_on_caller(ip);
2867         current->lockdep_recursion = 0;
2868 }
2869
2870 /*
2871  * Hardirqs were disabled:
2872  */
2873 void lockdep_hardirqs_off(unsigned long ip)
2874 {
2875         struct task_struct *curr = current;
2876
2877         if (unlikely(!debug_locks || current->lockdep_recursion))
2878                 return;
2879
2880         /*
2881          * So we're supposed to get called after you mask local IRQs, but for
2882          * some reason the hardware doesn't quite think you did a proper job.
2883          */
2884         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2885                 return;
2886
2887         if (curr->hardirqs_enabled) {
2888                 /*
2889                  * We have done an ON -> OFF transition:
2890                  */
2891                 curr->hardirqs_enabled = 0;
2892                 curr->hardirq_disable_ip = ip;
2893                 curr->hardirq_disable_event = ++curr->irq_events;
2894                 debug_atomic_inc(hardirqs_off_events);
2895         } else
2896                 debug_atomic_inc(redundant_hardirqs_off);
2897 }
2898
2899 /*
2900  * Softirqs will be enabled:
2901  */
2902 void trace_softirqs_on(unsigned long ip)
2903 {
2904         struct task_struct *curr = current;
2905
2906         if (unlikely(!debug_locks || current->lockdep_recursion))
2907                 return;
2908
2909         /*
2910          * We fancy IRQs being disabled here, see softirq.c, avoids
2911          * funny state and nesting things.
2912          */
2913         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2914                 return;
2915
2916         if (curr->softirqs_enabled) {
2917                 debug_atomic_inc(redundant_softirqs_on);
2918                 return;
2919         }
2920
2921         current->lockdep_recursion = 1;
2922         /*
2923          * We'll do an OFF -> ON transition:
2924          */
2925         curr->softirqs_enabled = 1;
2926         curr->softirq_enable_ip = ip;
2927         curr->softirq_enable_event = ++curr->irq_events;
2928         debug_atomic_inc(softirqs_on_events);
2929         /*
2930          * We are going to turn softirqs on, so set the
2931          * usage bit for all held locks, if hardirqs are
2932          * enabled too:
2933          */
2934         if (curr->hardirqs_enabled)
2935                 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
2936         current->lockdep_recursion = 0;
2937 }
2938
2939 /*
2940  * Softirqs were disabled:
2941  */
2942 void trace_softirqs_off(unsigned long ip)
2943 {
2944         struct task_struct *curr = current;
2945
2946         if (unlikely(!debug_locks || current->lockdep_recursion))
2947                 return;
2948
2949         /*
2950          * We fancy IRQs being disabled here, see softirq.c
2951          */
2952         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2953                 return;
2954
2955         if (curr->softirqs_enabled) {
2956                 /*
2957                  * We have done an ON -> OFF transition:
2958                  */
2959                 curr->softirqs_enabled = 0;
2960                 curr->softirq_disable_ip = ip;
2961                 curr->softirq_disable_event = ++curr->irq_events;
2962                 debug_atomic_inc(softirqs_off_events);
2963                 /*
2964                  * Whoops, we wanted softirqs off, so why aren't they?
2965                  */
2966                 DEBUG_LOCKS_WARN_ON(!softirq_count());
2967         } else
2968                 debug_atomic_inc(redundant_softirqs_off);
2969 }
2970
2971 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2972 {
2973         /*
2974          * If non-trylock use in a hardirq or softirq context, then
2975          * mark the lock as used in these contexts:
2976          */
2977         if (!hlock->trylock) {
2978                 if (hlock->read) {
2979                         if (curr->hardirq_context)
2980                                 if (!mark_lock(curr, hlock,
2981                                                 LOCK_USED_IN_HARDIRQ_READ))
2982                                         return 0;
2983                         if (curr->softirq_context)
2984                                 if (!mark_lock(curr, hlock,
2985                                                 LOCK_USED_IN_SOFTIRQ_READ))
2986                                         return 0;
2987                 } else {
2988                         if (curr->hardirq_context)
2989                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2990                                         return 0;
2991                         if (curr->softirq_context)
2992                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2993                                         return 0;
2994                 }
2995         }
2996         if (!hlock->hardirqs_off) {
2997                 if (hlock->read) {
2998                         if (!mark_lock(curr, hlock,
2999                                         LOCK_ENABLED_HARDIRQ_READ))
3000                                 return 0;
3001                         if (curr->softirqs_enabled)
3002                                 if (!mark_lock(curr, hlock,
3003                                                 LOCK_ENABLED_SOFTIRQ_READ))
3004                                         return 0;
3005                 } else {
3006                         if (!mark_lock(curr, hlock,
3007                                         LOCK_ENABLED_HARDIRQ))
3008                                 return 0;
3009                         if (curr->softirqs_enabled)
3010                                 if (!mark_lock(curr, hlock,
3011                                                 LOCK_ENABLED_SOFTIRQ))
3012                                         return 0;
3013                 }
3014         }
3015
3016         return 1;
3017 }
3018
3019 static inline unsigned int task_irq_context(struct task_struct *task)
3020 {
3021         return 2 * !!task->hardirq_context + !!task->softirq_context;
3022 }
3023
3024 static int separate_irq_context(struct task_struct *curr,
3025                 struct held_lock *hlock)
3026 {
3027         unsigned int depth = curr->lockdep_depth;
3028
3029         /*
3030          * Keep track of points where we cross into an interrupt context:
3031          */
3032         if (depth) {
3033                 struct held_lock *prev_hlock;
3034
3035                 prev_hlock = curr->held_locks + depth-1;
3036                 /*
3037                  * If we cross into another context, reset the
3038                  * hash key (this also prevents the checking and the
3039                  * adding of the dependency to 'prev'):
3040                  */
3041                 if (prev_hlock->irq_context != hlock->irq_context)
3042                         return 1;
3043         }
3044         return 0;
3045 }
3046
3047 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3048
3049 static inline
3050 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
3051                 enum lock_usage_bit new_bit)
3052 {
3053         WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
3054         return 1;
3055 }
3056
3057 static inline int mark_irqflags(struct task_struct *curr,
3058                 struct held_lock *hlock)
3059 {
3060         return 1;
3061 }
3062
3063 static inline unsigned int task_irq_context(struct task_struct *task)
3064 {
3065         return 0;
3066 }
3067
3068 static inline int separate_irq_context(struct task_struct *curr,
3069                 struct held_lock *hlock)
3070 {
3071         return 0;
3072 }
3073
3074 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
3075
3076 /*
3077  * Mark a lock with a usage bit, and validate the state transition:
3078  */
3079 static int mark_lock(struct task_struct *curr, struct held_lock *this,
3080                              enum lock_usage_bit new_bit)
3081 {
3082         unsigned int new_mask = 1 << new_bit, ret = 1;
3083
3084         /*
3085          * If already set then do not dirty the cacheline,
3086          * nor do any checks:
3087          */
3088         if (likely(hlock_class(this)->usage_mask & new_mask))
3089                 return 1;
3090
3091         if (!graph_lock())
3092                 return 0;
3093         /*
3094          * Make sure we didn't race:
3095          */
3096         if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
3097                 graph_unlock();
3098                 return 1;
3099         }
3100
3101         hlock_class(this)->usage_mask |= new_mask;
3102
3103         if (!save_trace(hlock_class(this)->usage_traces + new_bit))
3104                 return 0;
3105
3106         switch (new_bit) {
3107 #define LOCKDEP_STATE(__STATE)                  \
3108         case LOCK_USED_IN_##__STATE:            \
3109         case LOCK_USED_IN_##__STATE##_READ:     \
3110         case LOCK_ENABLED_##__STATE:            \
3111         case LOCK_ENABLED_##__STATE##_READ:
3112 #include "lockdep_states.h"
3113 #undef LOCKDEP_STATE
3114                 ret = mark_lock_irq(curr, this, new_bit);
3115                 if (!ret)
3116                         return 0;
3117                 break;
3118         case LOCK_USED:
3119                 debug_atomic_dec(nr_unused_locks);
3120                 break;
3121         default:
3122                 if (!debug_locks_off_graph_unlock())
3123                         return 0;
3124                 WARN_ON(1);
3125                 return 0;
3126         }
3127
3128         graph_unlock();
3129
3130         /*
3131          * We must printk outside of the graph_lock:
3132          */
3133         if (ret == 2) {
3134                 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
3135                 print_lock(this);
3136                 print_irqtrace_events(curr);
3137                 dump_stack();
3138         }
3139
3140         return ret;
3141 }
3142
3143 /*
3144  * Initialize a lock instance's lock-class mapping info:
3145  */
3146 void lockdep_init_map(struct lockdep_map *lock, const char *name,
3147                       struct lock_class_key *key, int subclass)
3148 {
3149         int i;
3150
3151         for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
3152                 lock->class_cache[i] = NULL;
3153
3154 #ifdef CONFIG_LOCK_STAT
3155         lock->cpu = raw_smp_processor_id();
3156 #endif
3157
3158         /*
3159          * Can't be having no nameless bastards around this place!
3160          */
3161         if (DEBUG_LOCKS_WARN_ON(!name)) {
3162                 lock->name = "NULL";
3163                 return;
3164         }
3165
3166         lock->name = name;
3167
3168         /*
3169          * No key, no joy, we need to hash something.
3170          */
3171         if (DEBUG_LOCKS_WARN_ON(!key))
3172                 return;
3173         /*
3174          * Sanity check, the lock-class key must be persistent:
3175          */
3176         if (!static_obj(key)) {
3177                 printk("BUG: key %px not in .data!\n", key);
3178                 /*
3179                  * What it says above ^^^^^, I suggest you read it.
3180                  */
3181                 DEBUG_LOCKS_WARN_ON(1);
3182                 return;
3183         }
3184         lock->key = key;
3185
3186         if (unlikely(!debug_locks))
3187                 return;
3188
3189         if (subclass) {
3190                 unsigned long flags;
3191
3192                 if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
3193                         return;
3194
3195                 raw_local_irq_save(flags);
3196                 current->lockdep_recursion = 1;
3197                 register_lock_class(lock, subclass, 1);
3198                 current->lockdep_recursion = 0;
3199                 raw_local_irq_restore(flags);
3200         }
3201 }
3202 EXPORT_SYMBOL_GPL(lockdep_init_map);
3203
3204 struct lock_class_key __lockdep_no_validate__;
3205 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
3206
3207 static int
3208 print_lock_nested_lock_not_held(struct task_struct *curr,
3209                                 struct held_lock *hlock,
3210                                 unsigned long ip)
3211 {
3212         if (!debug_locks_off())
3213                 return 0;
3214         if (debug_locks_silent)
3215                 return 0;
3216
3217         pr_warn("\n");
3218         pr_warn("==================================\n");
3219         pr_warn("WARNING: Nested lock was not taken\n");
3220         print_kernel_ident();
3221         pr_warn("----------------------------------\n");
3222
3223         pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3224         print_lock(hlock);
3225
3226         pr_warn("\nbut this task is not holding:\n");
3227         pr_warn("%s\n", hlock->nest_lock->name);
3228
3229         pr_warn("\nstack backtrace:\n");
3230         dump_stack();
3231
3232         pr_warn("\nother info that might help us debug this:\n");
3233         lockdep_print_held_locks(curr);
3234
3235         pr_warn("\nstack backtrace:\n");
3236         dump_stack();
3237
3238         return 0;
3239 }
3240
3241 static int __lock_is_held(const struct lockdep_map *lock, int read);
3242
3243 /*
3244  * This gets called for every mutex_lock*()/spin_lock*() operation.
3245  * We maintain the dependency maps and validate the locking attempt:
3246  *
3247  * The callers must make sure that IRQs are disabled before calling it,
3248  * otherwise we could get an interrupt which would want to take locks,
3249  * which would end up in lockdep again.
3250  */
3251 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3252                           int trylock, int read, int check, int hardirqs_off,
3253                           struct lockdep_map *nest_lock, unsigned long ip,
3254                           int references, int pin_count)
3255 {
3256         struct task_struct *curr = current;
3257         struct lock_class *class = NULL;
3258         struct held_lock *hlock;
3259         unsigned int depth;
3260         int chain_head = 0;
3261         int class_idx;
3262         u64 chain_key;
3263
3264         if (unlikely(!debug_locks))
3265                 return 0;
3266
3267         if (!prove_locking || lock->key == &__lockdep_no_validate__)
3268                 check = 0;
3269
3270         if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3271                 class = lock->class_cache[subclass];
3272         /*
3273          * Not cached?
3274          */
3275         if (unlikely(!class)) {
3276                 class = register_lock_class(lock, subclass, 0);
3277                 if (!class)
3278                         return 0;
3279         }
3280
3281         debug_class_ops_inc(class);
3282
3283         if (very_verbose(class)) {
3284                 printk("\nacquire class [%px] %s", class->key, class->name);
3285                 if (class->name_version > 1)
3286                         printk(KERN_CONT "#%d", class->name_version);
3287                 printk(KERN_CONT "\n");
3288                 dump_stack();
3289         }
3290
3291         /*
3292          * Add the lock to the list of currently held locks.
3293          * (we dont increase the depth just yet, up until the
3294          * dependency checks are done)
3295          */
3296         depth = curr->lockdep_depth;
3297         /*
3298          * Ran out of static storage for our per-task lock stack again have we?
3299          */
3300         if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3301                 return 0;
3302
3303         class_idx = class - lock_classes + 1;
3304
3305         if (depth) {
3306                 hlock = curr->held_locks + depth - 1;
3307                 if (hlock->class_idx == class_idx && nest_lock) {
3308                         if (hlock->references) {
3309                                 /*
3310                                  * Check: unsigned int references:12, overflow.
3311                                  */
3312                                 if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
3313                                         return 0;
3314
3315                                 hlock->references++;
3316                         } else {
3317                                 hlock->references = 2;
3318                         }
3319
3320                         return 1;
3321                 }
3322         }
3323
3324         hlock = curr->held_locks + depth;
3325         /*
3326          * Plain impossible, we just registered it and checked it weren't no
3327          * NULL like.. I bet this mushroom I ate was good!
3328          */
3329         if (DEBUG_LOCKS_WARN_ON(!class))
3330                 return 0;
3331         hlock->class_idx = class_idx;
3332         hlock->acquire_ip = ip;
3333         hlock->instance = lock;
3334         hlock->nest_lock = nest_lock;
3335         hlock->irq_context = task_irq_context(curr);
3336         hlock->trylock = trylock;
3337         hlock->read = read;
3338         hlock->check = check;
3339         hlock->hardirqs_off = !!hardirqs_off;
3340         hlock->references = references;
3341 #ifdef CONFIG_LOCK_STAT
3342         hlock->waittime_stamp = 0;
3343         hlock->holdtime_stamp = lockstat_clock();
3344 #endif
3345         hlock->pin_count = pin_count;
3346
3347         if (check && !mark_irqflags(curr, hlock))
3348                 return 0;
3349
3350         /* mark it as used: */
3351         if (!mark_lock(curr, hlock, LOCK_USED))
3352                 return 0;
3353
3354         /*
3355          * Calculate the chain hash: it's the combined hash of all the
3356          * lock keys along the dependency chain. We save the hash value
3357          * at every step so that we can get the current hash easily
3358          * after unlock. The chain hash is then used to cache dependency
3359          * results.
3360          *
3361          * The 'key ID' is what is the most compact key value to drive
3362          * the hash, not class->key.
3363          */
3364         /*
3365          * Whoops, we did it again.. ran straight out of our static allocation.
3366          */
3367         if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
3368                 return 0;
3369
3370         chain_key = curr->curr_chain_key;
3371         if (!depth) {
3372                 /*
3373                  * How can we have a chain hash when we ain't got no keys?!
3374                  */
3375                 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3376                         return 0;
3377                 chain_head = 1;
3378         }
3379
3380         hlock->prev_chain_key = chain_key;
3381         if (separate_irq_context(curr, hlock)) {
3382                 chain_key = 0;
3383                 chain_head = 1;
3384         }
3385         chain_key = iterate_chain_key(chain_key, class_idx);
3386
3387         if (nest_lock && !__lock_is_held(nest_lock, -1))
3388                 return print_lock_nested_lock_not_held(curr, hlock, ip);
3389
3390         if (!debug_locks_silent) {
3391                 WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
3392                 WARN_ON_ONCE(!hlock_class(hlock)->key);
3393         }
3394
3395         if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3396                 return 0;
3397
3398         curr->curr_chain_key = chain_key;
3399         curr->lockdep_depth++;
3400         check_chain_key(curr);
3401 #ifdef CONFIG_DEBUG_LOCKDEP
3402         if (unlikely(!debug_locks))
3403                 return 0;
3404 #endif
3405         if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3406                 debug_locks_off();
3407                 print_lockdep_off("BUG: MAX_LOCK_DEPTH too low!");
3408                 printk(KERN_DEBUG "depth: %i  max: %lu!\n",
3409                        curr->lockdep_depth, MAX_LOCK_DEPTH);
3410
3411                 lockdep_print_held_locks(current);
3412                 debug_show_all_locks();
3413                 dump_stack();
3414
3415                 return 0;
3416         }
3417
3418         if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3419                 max_lockdep_depth = curr->lockdep_depth;
3420
3421         return 1;
3422 }
3423
3424 static int
3425 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3426                            unsigned long ip)
3427 {
3428         if (!debug_locks_off())
3429                 return 0;
3430         if (debug_locks_silent)
3431                 return 0;
3432
3433         pr_warn("\n");
3434         pr_warn("=====================================\n");
3435         pr_warn("WARNING: bad unlock balance detected!\n");
3436         print_kernel_ident();
3437         pr_warn("-------------------------------------\n");
3438         pr_warn("%s/%d is trying to release lock (",
3439                 curr->comm, task_pid_nr(curr));
3440         print_lockdep_cache(lock);
3441         pr_cont(") at:\n");
3442         print_ip_sym(ip);
3443         pr_warn("but there are no more locks to release!\n");
3444         pr_warn("\nother info that might help us debug this:\n");
3445         lockdep_print_held_locks(curr);
3446
3447         pr_warn("\nstack backtrace:\n");
3448         dump_stack();
3449
3450         return 0;
3451 }
3452
3453 static int match_held_lock(const struct held_lock *hlock,
3454                                         const struct lockdep_map *lock)
3455 {
3456         if (hlock->instance == lock)
3457                 return 1;
3458
3459         if (hlock->references) {
3460                 const struct lock_class *class = lock->class_cache[0];
3461
3462                 if (!class)
3463                         class = look_up_lock_class(lock, 0);
3464
3465                 /*
3466                  * If look_up_lock_class() failed to find a class, we're trying
3467                  * to test if we hold a lock that has never yet been acquired.
3468                  * Clearly if the lock hasn't been acquired _ever_, we're not
3469                  * holding it either, so report failure.
3470                  */
3471                 if (!class)
3472                         return 0;
3473
3474                 /*
3475                  * References, but not a lock we're actually ref-counting?
3476                  * State got messed up, follow the sites that change ->references
3477                  * and try to make sense of it.
3478                  */
3479                 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3480                         return 0;
3481
3482                 if (hlock->class_idx == class - lock_classes + 1)
3483                         return 1;
3484         }
3485
3486         return 0;
3487 }
3488
3489 /* @depth must not be zero */
3490 static struct held_lock *find_held_lock(struct task_struct *curr,
3491                                         struct lockdep_map *lock,
3492                                         unsigned int depth, int *idx)
3493 {
3494         struct held_lock *ret, *hlock, *prev_hlock;
3495         int i;
3496
3497         i = depth - 1;
3498         hlock = curr->held_locks + i;
3499         ret = hlock;
3500         if (match_held_lock(hlock, lock))
3501                 goto out;
3502
3503         ret = NULL;
3504         for (i--, prev_hlock = hlock--;
3505              i >= 0;
3506              i--, prev_hlock = hlock--) {
3507                 /*
3508                  * We must not cross into another context:
3509                  */
3510                 if (prev_hlock->irq_context != hlock->irq_context) {
3511                         ret = NULL;
3512                         break;
3513                 }
3514                 if (match_held_lock(hlock, lock)) {
3515                         ret = hlock;
3516                         break;
3517                 }
3518         }
3519
3520 out:
3521         *idx = i;
3522         return ret;
3523 }
3524
3525 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
3526                               int idx)
3527 {
3528         struct held_lock *hlock;
3529
3530         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3531                 return 0;
3532
3533         for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
3534                 if (!__lock_acquire(hlock->instance,
3535                                     hlock_class(hlock)->subclass,
3536                                     hlock->trylock,
3537                                     hlock->read, hlock->check,
3538                                     hlock->hardirqs_off,
3539                                     hlock->nest_lock, hlock->acquire_ip,
3540                                     hlock->references, hlock->pin_count))
3541                         return 1;
3542         }
3543         return 0;
3544 }
3545
3546 static int
3547 __lock_set_class(struct lockdep_map *lock, const char *name,
3548                  struct lock_class_key *key, unsigned int subclass,
3549                  unsigned long ip)
3550 {
3551         struct task_struct *curr = current;
3552         struct held_lock *hlock;
3553         struct lock_class *class;
3554         unsigned int depth;
3555         int i;
3556
3557         if (unlikely(!debug_locks))
3558                 return 0;
3559
3560         depth = curr->lockdep_depth;
3561         /*
3562          * This function is about (re)setting the class of a held lock,
3563          * yet we're not actually holding any locks. Naughty user!
3564          */
3565         if (DEBUG_LOCKS_WARN_ON(!depth))
3566                 return 0;
3567
3568         hlock = find_held_lock(curr, lock, depth, &i);
3569         if (!hlock)
3570                 return print_unlock_imbalance_bug(curr, lock, ip);
3571
3572         lockdep_init_map(lock, name, key, 0);
3573         class = register_lock_class(lock, subclass, 0);
3574         hlock->class_idx = class - lock_classes + 1;
3575
3576         curr->lockdep_depth = i;
3577         curr->curr_chain_key = hlock->prev_chain_key;
3578
3579         if (reacquire_held_locks(curr, depth, i))
3580                 return 0;
3581
3582         /*
3583          * I took it apart and put it back together again, except now I have
3584          * these 'spare' parts.. where shall I put them.
3585          */
3586         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3587                 return 0;
3588         return 1;
3589 }
3590
3591 static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3592 {
3593         struct task_struct *curr = current;
3594         struct held_lock *hlock;
3595         unsigned int depth;
3596         int i;
3597
3598         if (unlikely(!debug_locks))
3599                 return 0;
3600
3601         depth = curr->lockdep_depth;
3602         /*
3603          * This function is about (re)setting the class of a held lock,
3604          * yet we're not actually holding any locks. Naughty user!
3605          */
3606         if (DEBUG_LOCKS_WARN_ON(!depth))
3607                 return 0;
3608
3609         hlock = find_held_lock(curr, lock, depth, &i);
3610         if (!hlock)
3611                 return print_unlock_imbalance_bug(curr, lock, ip);
3612
3613         curr->lockdep_depth = i;
3614         curr->curr_chain_key = hlock->prev_chain_key;
3615
3616         WARN(hlock->read, "downgrading a read lock");
3617         hlock->read = 1;
3618         hlock->acquire_ip = ip;
3619
3620         if (reacquire_held_locks(curr, depth, i))
3621                 return 0;
3622
3623         /*
3624          * I took it apart and put it back together again, except now I have
3625          * these 'spare' parts.. where shall I put them.
3626          */
3627         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3628                 return 0;
3629         return 1;
3630 }
3631
3632 /*
3633  * Remove the lock to the list of currently held locks - this gets
3634  * called on mutex_unlock()/spin_unlock*() (or on a failed
3635  * mutex_lock_interruptible()).
3636  *
3637  * @nested is an hysterical artifact, needs a tree wide cleanup.
3638  */
3639 static int
3640 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3641 {
3642         struct task_struct *curr = current;
3643         struct held_lock *hlock;
3644         unsigned int depth;
3645         int i;
3646
3647         if (unlikely(!debug_locks))
3648                 return 0;
3649
3650         depth = curr->lockdep_depth;
3651         /*
3652          * So we're all set to release this lock.. wait what lock? We don't
3653          * own any locks, you've been drinking again?
3654          */
3655         if (DEBUG_LOCKS_WARN_ON(depth <= 0))
3656                  return print_unlock_imbalance_bug(curr, lock, ip);
3657
3658         /*
3659          * Check whether the lock exists in the current stack
3660          * of held locks:
3661          */
3662         hlock = find_held_lock(curr, lock, depth, &i);
3663         if (!hlock)
3664                 return print_unlock_imbalance_bug(curr, lock, ip);
3665
3666         if (hlock->instance == lock)
3667                 lock_release_holdtime(hlock);
3668
3669         WARN(hlock->pin_count, "releasing a pinned lock\n");
3670
3671         if (hlock->references) {
3672                 hlock->references--;
3673                 if (hlock->references) {
3674                         /*
3675                          * We had, and after removing one, still have
3676                          * references, the current lock stack is still
3677                          * valid. We're done!
3678                          */
3679                         return 1;
3680                 }
3681         }
3682
3683         /*
3684          * We have the right lock to unlock, 'hlock' points to it.
3685          * Now we remove it from the stack, and add back the other
3686          * entries (if any), recalculating the hash along the way:
3687          */
3688
3689         curr->lockdep_depth = i;
3690         curr->curr_chain_key = hlock->prev_chain_key;
3691
3692         /*
3693          * The most likely case is when the unlock is on the innermost
3694          * lock. In this case, we are done!
3695          */
3696         if (i == depth-1)
3697                 return 1;
3698
3699         if (reacquire_held_locks(curr, depth, i + 1))
3700                 return 0;
3701
3702         /*
3703          * We had N bottles of beer on the wall, we drank one, but now
3704          * there's not N-1 bottles of beer left on the wall...
3705          */
3706         DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth-1);
3707
3708         /*
3709          * Since reacquire_held_locks() would have called check_chain_key()
3710          * indirectly via __lock_acquire(), we don't need to do it again
3711          * on return.
3712          */
3713         return 0;
3714 }
3715
3716 static int __lock_is_held(const struct lockdep_map *lock, int read)
3717 {
3718         struct task_struct *curr = current;
3719         int i;
3720
3721         for (i = 0; i < curr->lockdep_depth; i++) {
3722                 struct held_lock *hlock = curr->held_locks + i;
3723
3724                 if (match_held_lock(hlock, lock)) {
3725                         if (read == -1 || hlock->read == read)
3726                                 return 1;
3727
3728                         return 0;
3729                 }
3730         }
3731
3732         return 0;
3733 }
3734
3735 static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
3736 {
3737         struct pin_cookie cookie = NIL_COOKIE;
3738         struct task_struct *curr = current;
3739         int i;
3740
3741         if (unlikely(!debug_locks))
3742                 return cookie;
3743
3744         for (i = 0; i < curr->lockdep_depth; i++) {
3745                 struct held_lock *hlock = curr->held_locks + i;
3746
3747                 if (match_held_lock(hlock, lock)) {
3748                         /*
3749                          * Grab 16bits of randomness; this is sufficient to not
3750                          * be guessable and still allows some pin nesting in
3751                          * our u32 pin_count.
3752                          */
3753                         cookie.val = 1 + (prandom_u32() >> 16);
3754                         hlock->pin_count += cookie.val;
3755                         return cookie;
3756                 }
3757         }
3758
3759         WARN(1, "pinning an unheld lock\n");
3760         return cookie;
3761 }
3762
3763 static void __lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3764 {
3765         struct task_struct *curr = current;
3766         int i;
3767
3768         if (unlikely(!debug_locks))
3769                 return;
3770
3771         for (i = 0; i < curr->lockdep_depth; i++) {
3772                 struct held_lock *hlock = curr->held_locks + i;
3773
3774                 if (match_held_lock(hlock, lock)) {
3775                         hlock->pin_count += cookie.val;
3776                         return;
3777                 }
3778         }
3779
3780         WARN(1, "pinning an unheld lock\n");
3781 }
3782
3783 static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3784 {
3785         struct task_struct *curr = current;
3786         int i;
3787
3788         if (unlikely(!debug_locks))
3789                 return;
3790
3791         for (i = 0; i < curr->lockdep_depth; i++) {
3792                 struct held_lock *hlock = curr->held_locks + i;
3793
3794                 if (match_held_lock(hlock, lock)) {
3795                         if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
3796                                 return;
3797
3798                         hlock->pin_count -= cookie.val;
3799
3800                         if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
3801                                 hlock->pin_count = 0;
3802
3803                         return;
3804                 }
3805         }
3806
3807         WARN(1, "unpinning an unheld lock\n");
3808 }
3809
3810 /*
3811  * Check whether we follow the irq-flags state precisely:
3812  */
3813 static void check_flags(unsigned long flags)
3814 {
3815 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3816     defined(CONFIG_TRACE_IRQFLAGS)
3817         if (!debug_locks)
3818                 return;
3819
3820         if (irqs_disabled_flags(flags)) {
3821                 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3822                         printk("possible reason: unannotated irqs-off.\n");
3823                 }
3824         } else {
3825                 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3826                         printk("possible reason: unannotated irqs-on.\n");
3827                 }
3828         }
3829
3830         /*
3831          * We dont accurately track softirq state in e.g.
3832          * hardirq contexts (such as on 4KSTACKS), so only
3833          * check if not in hardirq contexts:
3834          */
3835         if (!hardirq_count()) {
3836                 if (softirq_count()) {
3837                         /* like the above, but with softirqs */
3838                         DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3839                 } else {
3840                         /* lick the above, does it taste good? */
3841                         DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3842                 }
3843         }
3844
3845         if (!debug_locks)
3846                 print_irqtrace_events(current);
3847 #endif
3848 }
3849
3850 void lock_set_class(struct lockdep_map *lock, const char *name,
3851                     struct lock_class_key *key, unsigned int subclass,
3852                     unsigned long ip)
3853 {
3854         unsigned long flags;
3855
3856         if (unlikely(current->lockdep_recursion))
3857                 return;
3858
3859         raw_local_irq_save(flags);
3860         current->lockdep_recursion = 1;
3861         check_flags(flags);
3862         if (__lock_set_class(lock, name, key, subclass, ip))
3863                 check_chain_key(current);
3864         current->lockdep_recursion = 0;
3865         raw_local_irq_restore(flags);
3866 }
3867 EXPORT_SYMBOL_GPL(lock_set_class);
3868
3869 void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
3870 {
3871         unsigned long flags;
3872
3873         if (unlikely(current->lockdep_recursion))
3874                 return;
3875
3876         raw_local_irq_save(flags);
3877         current->lockdep_recursion = 1;
3878         check_flags(flags);
3879         if (__lock_downgrade(lock, ip))
3880                 check_chain_key(current);
3881         current->lockdep_recursion = 0;
3882         raw_local_irq_restore(flags);
3883 }
3884 EXPORT_SYMBOL_GPL(lock_downgrade);
3885
3886 /*
3887  * We are not always called with irqs disabled - do that here,
3888  * and also avoid lockdep recursion:
3889  */
3890 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3891                           int trylock, int read, int check,
3892                           struct lockdep_map *nest_lock, unsigned long ip)
3893 {
3894         unsigned long flags;
3895
3896         if (unlikely(current->lockdep_recursion))
3897                 return;
3898
3899         raw_local_irq_save(flags);
3900         check_flags(flags);
3901
3902         current->lockdep_recursion = 1;
3903         trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3904         __lock_acquire(lock, subclass, trylock, read, check,
3905                        irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
3906         current->lockdep_recursion = 0;
3907         raw_local_irq_restore(flags);
3908 }
3909 EXPORT_SYMBOL_GPL(lock_acquire);
3910
3911 void lock_release(struct lockdep_map *lock, int nested,
3912                           unsigned long ip)
3913 {
3914         unsigned long flags;
3915
3916         if (unlikely(current->lockdep_recursion))
3917                 return;
3918
3919         raw_local_irq_save(flags);
3920         check_flags(flags);
3921         current->lockdep_recursion = 1;
3922         trace_lock_release(lock, ip);
3923         if (__lock_release(lock, nested, ip))
3924                 check_chain_key(current);
3925         current->lockdep_recursion = 0;
3926         raw_local_irq_restore(flags);
3927 }
3928 EXPORT_SYMBOL_GPL(lock_release);
3929
3930 int lock_is_held_type(const struct lockdep_map *lock, int read)
3931 {
3932         unsigned long flags;
3933         int ret = 0;
3934
3935         if (unlikely(current->lockdep_recursion))
3936                 return 1; /* avoid false negative lockdep_assert_held() */
3937
3938         raw_local_irq_save(flags);
3939         check_flags(flags);
3940
3941         current->lockdep_recursion = 1;
3942         ret = __lock_is_held(lock, read);
3943         current->lockdep_recursion = 0;
3944         raw_local_irq_restore(flags);
3945
3946         return ret;
3947 }
3948 EXPORT_SYMBOL_GPL(lock_is_held_type);
3949
3950 struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
3951 {
3952         struct pin_cookie cookie = NIL_COOKIE;
3953         unsigned long flags;
3954
3955         if (unlikely(current->lockdep_recursion))
3956                 return cookie;
3957
3958         raw_local_irq_save(flags);
3959         check_flags(flags);
3960
3961         current->lockdep_recursion = 1;
3962         cookie = __lock_pin_lock(lock);
3963         current->lockdep_recursion = 0;
3964         raw_local_irq_restore(flags);
3965
3966         return cookie;
3967 }
3968 EXPORT_SYMBOL_GPL(lock_pin_lock);
3969
3970 void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3971 {
3972         unsigned long flags;
3973
3974         if (unlikely(current->lockdep_recursion))
3975                 return;
3976
3977         raw_local_irq_save(flags);
3978         check_flags(flags);
3979
3980         current->lockdep_recursion = 1;
3981         __lock_repin_lock(lock, cookie);
3982         current->lockdep_recursion = 0;
3983         raw_local_irq_restore(flags);
3984 }
3985 EXPORT_SYMBOL_GPL(lock_repin_lock);
3986
3987 void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
3988 {
3989         unsigned long flags;
3990
3991         if (unlikely(current->lockdep_recursion))
3992                 return;
3993
3994         raw_local_irq_save(flags);
3995         check_flags(flags);
3996
3997         current->lockdep_recursion = 1;
3998         __lock_unpin_lock(lock, cookie);
3999         current->lockdep_recursion = 0;
4000         raw_local_irq_restore(flags);
4001 }
4002 EXPORT_SYMBOL_GPL(lock_unpin_lock);
4003
4004 #ifdef CONFIG_LOCK_STAT
4005 static int
4006 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
4007                            unsigned long ip)
4008 {
4009         if (!debug_locks_off())
4010                 return 0;
4011         if (debug_locks_silent)
4012                 return 0;
4013
4014         pr_warn("\n");
4015         pr_warn("=================================\n");
4016         pr_warn("WARNING: bad contention detected!\n");
4017         print_kernel_ident();
4018         pr_warn("---------------------------------\n");
4019         pr_warn("%s/%d is trying to contend lock (",
4020                 curr->comm, task_pid_nr(curr));
4021         print_lockdep_cache(lock);
4022         pr_cont(") at:\n");
4023         print_ip_sym(ip);
4024         pr_warn("but there are no locks held!\n");
4025         pr_warn("\nother info that might help us debug this:\n");
4026         lockdep_print_held_locks(curr);
4027
4028         pr_warn("\nstack backtrace:\n");
4029         dump_stack();
4030
4031         return 0;
4032 }
4033
4034 static void
4035 __lock_contended(struct lockdep_map *lock, unsigned long ip)
4036 {
4037         struct task_struct *curr = current;
4038         struct held_lock *hlock;
4039         struct lock_class_stats *stats;
4040         unsigned int depth;
4041         int i, contention_point, contending_point;
4042
4043         depth = curr->lockdep_depth;
4044         /*
4045          * Whee, we contended on this lock, except it seems we're not
4046          * actually trying to acquire anything much at all..
4047          */
4048         if (DEBUG_LOCKS_WARN_ON(!depth))
4049                 return;
4050
4051         hlock = find_held_lock(curr, lock, depth, &i);
4052         if (!hlock) {
4053                 print_lock_contention_bug(curr, lock, ip);
4054                 return;
4055         }
4056
4057         if (hlock->instance != lock)
4058                 return;
4059
4060         hlock->waittime_stamp = lockstat_clock();
4061
4062         contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
4063         contending_point = lock_point(hlock_class(hlock)->contending_point,
4064                                       lock->ip);
4065
4066         stats = get_lock_stats(hlock_class(hlock));
4067         if (contention_point < LOCKSTAT_POINTS)
4068                 stats->contention_point[contention_point]++;
4069         if (contending_point < LOCKSTAT_POINTS)
4070                 stats->contending_point[contending_point]++;
4071         if (lock->cpu != smp_processor_id())
4072                 stats->bounces[bounce_contended + !!hlock->read]++;
4073 }
4074
4075 static void
4076 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
4077 {
4078         struct task_struct *curr = current;
4079         struct held_lock *hlock;
4080         struct lock_class_stats *stats;
4081         unsigned int depth;
4082         u64 now, waittime = 0;
4083         int i, cpu;
4084
4085         depth = curr->lockdep_depth;
4086         /*
4087          * Yay, we acquired ownership of this lock we didn't try to
4088          * acquire, how the heck did that happen?
4089          */
4090         if (DEBUG_LOCKS_WARN_ON(!depth))
4091                 return;
4092
4093         hlock = find_held_lock(curr, lock, depth, &i);
4094         if (!hlock) {
4095                 print_lock_contention_bug(curr, lock, _RET_IP_);
4096                 return;
4097         }
4098
4099         if (hlock->instance != lock)
4100                 return;
4101
4102         cpu = smp_processor_id();
4103         if (hlock->waittime_stamp) {
4104                 now = lockstat_clock();
4105                 waittime = now - hlock->waittime_stamp;
4106                 hlock->holdtime_stamp = now;
4107         }
4108
4109         trace_lock_acquired(lock, ip);
4110
4111         stats = get_lock_stats(hlock_class(hlock));
4112         if (waittime) {
4113                 if (hlock->read)
4114                         lock_time_inc(&stats->read_waittime, waittime);
4115                 else
4116                         lock_time_inc(&stats->write_waittime, waittime);
4117         }
4118         if (lock->cpu != cpu)
4119                 stats->bounces[bounce_acquired + !!hlock->read]++;
4120
4121         lock->cpu = cpu;
4122         lock->ip = ip;
4123 }
4124
4125 void lock_contended(struct lockdep_map *lock, unsigned long ip)
4126 {
4127         unsigned long flags;
4128
4129         if (unlikely(!lock_stat || !debug_locks))
4130                 return;
4131
4132         if (unlikely(current->lockdep_recursion))
4133                 return;
4134
4135         raw_local_irq_save(flags);
4136         check_flags(flags);
4137         current->lockdep_recursion = 1;
4138         trace_lock_contended(lock, ip);
4139         __lock_contended(lock, ip);
4140         current->lockdep_recursion = 0;
4141         raw_local_irq_restore(flags);
4142 }
4143 EXPORT_SYMBOL_GPL(lock_contended);
4144
4145 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
4146 {
4147         unsigned long flags;
4148
4149         if (unlikely(!lock_stat || !debug_locks))
4150                 return;
4151
4152         if (unlikely(current->lockdep_recursion))
4153                 return;
4154
4155         raw_local_irq_save(flags);
4156         check_flags(flags);
4157         current->lockdep_recursion = 1;
4158         __lock_acquired(lock, ip);
4159         current->lockdep_recursion = 0;
4160         raw_local_irq_restore(flags);
4161 }
4162 EXPORT_SYMBOL_GPL(lock_acquired);
4163 #endif
4164
4165 /*
4166  * Used by the testsuite, sanitize the validator state
4167  * after a simulated failure:
4168  */
4169
4170 void lockdep_reset(void)
4171 {
4172         unsigned long flags;
4173         int i;
4174
4175         raw_local_irq_save(flags);
4176         current->curr_chain_key = 0;
4177         current->lockdep_depth = 0;
4178         current->lockdep_recursion = 0;
4179         memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
4180         nr_hardirq_chains = 0;
4181         nr_softirq_chains = 0;
4182         nr_process_chains = 0;
4183         debug_locks = 1;
4184         for (i = 0; i < CHAINHASH_SIZE; i++)
4185                 INIT_HLIST_HEAD(chainhash_table + i);
4186         raw_local_irq_restore(flags);
4187 }
4188
4189 /* Remove a class from a lock chain. Must be called with the graph lock held. */
4190 static void remove_class_from_lock_chain(struct lock_chain *chain,
4191                                          struct lock_class *class)
4192 {
4193 #ifdef CONFIG_PROVE_LOCKING
4194         struct lock_chain *new_chain;
4195         u64 chain_key;
4196         int i;
4197
4198         for (i = chain->base; i < chain->base + chain->depth; i++) {
4199                 if (chain_hlocks[i] != class - lock_classes)
4200                         continue;
4201                 /* The code below leaks one chain_hlock[] entry. */
4202                 if (--chain->depth > 0)
4203                         memmove(&chain_hlocks[i], &chain_hlocks[i + 1],
4204                                 (chain->base + chain->depth - i) *
4205                                 sizeof(chain_hlocks[0]));
4206                 /*
4207                  * Each lock class occurs at most once in a lock chain so once
4208                  * we found a match we can break out of this loop.
4209                  */
4210                 goto recalc;
4211         }
4212         /* Since the chain has not been modified, return. */
4213         return;
4214
4215 recalc:
4216         chain_key = 0;
4217         for (i = chain->base; i < chain->base + chain->depth; i++)
4218                 chain_key = iterate_chain_key(chain_key, chain_hlocks[i] + 1);
4219         if (chain->depth && chain->chain_key == chain_key)
4220                 return;
4221         /* Overwrite the chain key for concurrent RCU readers. */
4222         WRITE_ONCE(chain->chain_key, chain_key);
4223         /*
4224          * Note: calling hlist_del_rcu() from inside a
4225          * hlist_for_each_entry_rcu() loop is safe.
4226          */
4227         hlist_del_rcu(&chain->entry);
4228         if (chain->depth == 0)
4229                 return;
4230         /*
4231          * If the modified lock chain matches an existing lock chain, drop
4232          * the modified lock chain.
4233          */
4234         if (lookup_chain_cache(chain_key))
4235                 return;
4236         if (WARN_ON_ONCE(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
4237                 debug_locks_off();
4238                 return;
4239         }
4240         /*
4241          * Leak *chain because it is not safe to reinsert it before an RCU
4242          * grace period has expired.
4243          */
4244         new_chain = lock_chains + nr_lock_chains++;
4245         *new_chain = *chain;
4246         hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
4247 #endif
4248 }
4249
4250 /* Must be called with the graph lock held. */
4251 static void remove_class_from_lock_chains(struct lock_class *class)
4252 {
4253         struct lock_chain *chain;
4254         struct hlist_head *head;
4255         int i;
4256
4257         for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
4258                 head = chainhash_table + i;
4259                 hlist_for_each_entry_rcu(chain, head, entry) {
4260                         remove_class_from_lock_chain(chain, class);
4261                 }
4262         }
4263 }
4264
4265 /*
4266  * Remove all references to a lock class. The caller must hold the graph lock.
4267  */
4268 static void zap_class(struct pending_free *pf, struct lock_class *class)
4269 {
4270         struct lock_list *entry;
4271         int i;
4272
4273         WARN_ON_ONCE(!class->key);
4274
4275         /*
4276          * Remove all dependencies this lock is
4277          * involved in:
4278          */
4279         for (i = 0, entry = list_entries; i < nr_list_entries; i++, entry++) {
4280                 if (entry->class != class && entry->links_to != class)
4281                         continue;
4282                 list_del_rcu(&entry->entry);
4283                 /* Clear .class and .links_to to avoid double removal. */
4284                 WRITE_ONCE(entry->class, NULL);
4285                 WRITE_ONCE(entry->links_to, NULL);
4286         }
4287         if (list_empty(&class->locks_after) &&
4288             list_empty(&class->locks_before)) {
4289                 list_move_tail(&class->lock_entry, &pf->zapped);
4290                 hlist_del_rcu(&class->hash_entry);
4291                 WRITE_ONCE(class->key, NULL);
4292                 WRITE_ONCE(class->name, NULL);
4293                 nr_lock_classes--;
4294         } else {
4295                 WARN_ONCE(true, "%s() failed for class %s\n", __func__,
4296                           class->name);
4297         }
4298
4299         remove_class_from_lock_chains(class);
4300 }
4301
4302 static void reinit_class(struct lock_class *class)
4303 {
4304         void *const p = class;
4305         const unsigned int offset = offsetof(struct lock_class, key);
4306
4307         WARN_ON_ONCE(!class->lock_entry.next);
4308         WARN_ON_ONCE(!list_empty(&class->locks_after));
4309         WARN_ON_ONCE(!list_empty(&class->locks_before));
4310         memset(p + offset, 0, sizeof(*class) - offset);
4311         WARN_ON_ONCE(!class->lock_entry.next);
4312         WARN_ON_ONCE(!list_empty(&class->locks_after));
4313         WARN_ON_ONCE(!list_empty(&class->locks_before));
4314 }
4315
4316 static inline int within(const void *addr, void *start, unsigned long size)
4317 {
4318         return addr >= start && addr < start + size;
4319 }
4320
4321 static bool inside_selftest(void)
4322 {
4323         return current == lockdep_selftest_task_struct;
4324 }
4325
4326 /* The caller must hold the graph lock. */
4327 static struct pending_free *get_pending_free(void)
4328 {
4329         return delayed_free.pf + delayed_free.index;
4330 }
4331
4332 static void free_zapped_rcu(struct rcu_head *cb);
4333
4334 /*
4335  * Schedule an RCU callback if no RCU callback is pending. Must be called with
4336  * the graph lock held.
4337  */
4338 static void call_rcu_zapped(struct pending_free *pf)
4339 {
4340         WARN_ON_ONCE(inside_selftest());
4341
4342         if (list_empty(&pf->zapped))
4343                 return;
4344
4345         if (delayed_free.scheduled)
4346                 return;
4347
4348         delayed_free.scheduled = true;
4349
4350         WARN_ON_ONCE(delayed_free.pf + delayed_free.index != pf);
4351         delayed_free.index ^= 1;
4352
4353         call_rcu(&delayed_free.rcu_head, free_zapped_rcu);
4354 }
4355
4356 /* The caller must hold the graph lock. May be called from RCU context. */
4357 static void __free_zapped_classes(struct pending_free *pf)
4358 {
4359         struct lock_class *class;
4360
4361         list_for_each_entry(class, &pf->zapped, lock_entry)
4362                 reinit_class(class);
4363
4364         list_splice_init(&pf->zapped, &free_lock_classes);
4365 }
4366
4367 static void free_zapped_rcu(struct rcu_head *ch)
4368 {
4369         struct pending_free *pf;
4370         unsigned long flags;
4371
4372         if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
4373                 return;
4374
4375         raw_local_irq_save(flags);
4376         if (!graph_lock())
4377                 goto out_irq;
4378
4379         /* closed head */
4380         pf = delayed_free.pf + (delayed_free.index ^ 1);
4381         __free_zapped_classes(pf);
4382         delayed_free.scheduled = false;
4383
4384         /*
4385          * If there's anything on the open list, close and start a new callback.
4386          */
4387         call_rcu_zapped(delayed_free.pf + delayed_free.index);
4388
4389         graph_unlock();
4390 out_irq:
4391         raw_local_irq_restore(flags);
4392 }
4393
4394 /*
4395  * Remove all lock classes from the class hash table and from the
4396  * all_lock_classes list whose key or name is in the address range [start,
4397  * start + size). Move these lock classes to the zapped_classes list. Must
4398  * be called with the graph lock held.
4399  */
4400 static void __lockdep_free_key_range(struct pending_free *pf, void *start,
4401                                      unsigned long size)
4402 {
4403         struct lock_class *class;
4404         struct hlist_head *head;
4405         int i;
4406
4407         /* Unhash all classes that were created by a module. */
4408         for (i = 0; i < CLASSHASH_SIZE; i++) {
4409                 head = classhash_table + i;
4410                 hlist_for_each_entry_rcu(class, head, hash_entry) {
4411                         if (!within(class->key, start, size) &&
4412                             !within(class->name, start, size))
4413                                 continue;
4414                         zap_class(pf, class);
4415                 }
4416         }
4417 }
4418
4419 /*
4420  * Used in module.c to remove lock classes from memory that is going to be
4421  * freed; and possibly re-used by other modules.
4422  *
4423  * We will have had one synchronize_rcu() before getting here, so we're
4424  * guaranteed nobody will look up these exact classes -- they're properly dead
4425  * but still allocated.
4426  */
4427 static void lockdep_free_key_range_reg(void *start, unsigned long size)
4428 {
4429         struct pending_free *pf;
4430         unsigned long flags;
4431         int locked;
4432
4433         init_data_structures_once();
4434
4435         raw_local_irq_save(flags);
4436         locked = graph_lock();
4437         if (!locked)
4438                 goto out_irq;
4439
4440         pf = get_pending_free();
4441         __lockdep_free_key_range(pf, start, size);
4442         call_rcu_zapped(pf);
4443
4444         graph_unlock();
4445 out_irq:
4446         raw_local_irq_restore(flags);
4447
4448         /*
4449          * Wait for any possible iterators from look_up_lock_class() to pass
4450          * before continuing to free the memory they refer to.
4451          */
4452         synchronize_rcu();
4453 }
4454
4455 /*
4456  * Free all lockdep keys in the range [start, start+size). Does not sleep.
4457  * Ignores debug_locks. Must only be used by the lockdep selftests.
4458  */
4459 static void lockdep_free_key_range_imm(void *start, unsigned long size)
4460 {
4461         struct pending_free *pf = delayed_free.pf;
4462         unsigned long flags;
4463
4464         init_data_structures_once();
4465
4466         raw_local_irq_save(flags);
4467         arch_spin_lock(&lockdep_lock);
4468         __lockdep_free_key_range(pf, start, size);
4469         __free_zapped_classes(pf);
4470         arch_spin_unlock(&lockdep_lock);
4471         raw_local_irq_restore(flags);
4472 }
4473
4474 void lockdep_free_key_range(void *start, unsigned long size)
4475 {
4476         init_data_structures_once();
4477
4478         if (inside_selftest())
4479                 lockdep_free_key_range_imm(start, size);
4480         else
4481                 lockdep_free_key_range_reg(start, size);
4482 }
4483
4484 /*
4485  * Check whether any element of the @lock->class_cache[] array refers to a
4486  * registered lock class. The caller must hold either the graph lock or the
4487  * RCU read lock.
4488  */
4489 static bool lock_class_cache_is_registered(struct lockdep_map *lock)
4490 {
4491         struct lock_class *class;
4492         struct hlist_head *head;
4493         int i, j;
4494
4495         for (i = 0; i < CLASSHASH_SIZE; i++) {
4496                 head = classhash_table + i;
4497                 hlist_for_each_entry_rcu(class, head, hash_entry) {
4498                         for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
4499                                 if (lock->class_cache[j] == class)
4500                                         return true;
4501                 }
4502         }
4503         return false;
4504 }
4505
4506 /* The caller must hold the graph lock. Does not sleep. */
4507 static void __lockdep_reset_lock(struct pending_free *pf,
4508                                  struct lockdep_map *lock)
4509 {
4510         struct lock_class *class;
4511         int j;
4512
4513         /*
4514          * Remove all classes this lock might have:
4515          */
4516         for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
4517                 /*
4518                  * If the class exists we look it up and zap it:
4519                  */
4520                 class = look_up_lock_class(lock, j);
4521                 if (class)
4522                         zap_class(pf, class);
4523         }
4524         /*
4525          * Debug check: in the end all mapped classes should
4526          * be gone.
4527          */
4528         if (WARN_ON_ONCE(lock_class_cache_is_registered(lock)))
4529                 debug_locks_off();
4530 }
4531
4532 /*
4533  * Remove all information lockdep has about a lock if debug_locks == 1. Free
4534  * released data structures from RCU context.
4535  */
4536 static void lockdep_reset_lock_reg(struct lockdep_map *lock)
4537 {
4538         struct pending_free *pf;
4539         unsigned long flags;
4540         int locked;
4541
4542         raw_local_irq_save(flags);
4543         locked = graph_lock();
4544         if (!locked)
4545                 goto out_irq;
4546
4547         pf = get_pending_free();
4548         __lockdep_reset_lock(pf, lock);
4549         call_rcu_zapped(pf);
4550
4551         graph_unlock();
4552 out_irq:
4553         raw_local_irq_restore(flags);
4554 }
4555
4556 /*
4557  * Reset a lock. Does not sleep. Ignores debug_locks. Must only be used by the
4558  * lockdep selftests.
4559  */
4560 static void lockdep_reset_lock_imm(struct lockdep_map *lock)
4561 {
4562         struct pending_free *pf = delayed_free.pf;
4563         unsigned long flags;
4564
4565         raw_local_irq_save(flags);
4566         arch_spin_lock(&lockdep_lock);
4567         __lockdep_reset_lock(pf, lock);
4568         __free_zapped_classes(pf);
4569         arch_spin_unlock(&lockdep_lock);
4570         raw_local_irq_restore(flags);
4571 }
4572
4573 void lockdep_reset_lock(struct lockdep_map *lock)
4574 {
4575         init_data_structures_once();
4576
4577         if (inside_selftest())
4578                 lockdep_reset_lock_imm(lock);
4579         else
4580                 lockdep_reset_lock_reg(lock);
4581 }
4582
4583 void __init lockdep_init(void)
4584 {
4585         printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
4586
4587         printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
4588         printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
4589         printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
4590         printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
4591         printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
4592         printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
4593         printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
4594
4595         printk(" memory used by lock dependency info: %zu kB\n",
4596                (sizeof(lock_classes) +
4597                 sizeof(classhash_table) +
4598                 sizeof(list_entries) +
4599                 sizeof(chainhash_table) +
4600                 sizeof(delayed_free)
4601 #ifdef CONFIG_PROVE_LOCKING
4602                 + sizeof(lock_cq)
4603                 + sizeof(lock_chains)
4604                 + sizeof(chain_hlocks)
4605 #endif
4606                 ) / 1024
4607                 );
4608
4609         printk(" per task-struct memory footprint: %zu bytes\n",
4610                sizeof(((struct task_struct *)NULL)->held_locks));
4611 }
4612
4613 static void
4614 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4615                      const void *mem_to, struct held_lock *hlock)
4616 {
4617         if (!debug_locks_off())
4618                 return;
4619         if (debug_locks_silent)
4620                 return;
4621
4622         pr_warn("\n");
4623         pr_warn("=========================\n");
4624         pr_warn("WARNING: held lock freed!\n");
4625         print_kernel_ident();
4626         pr_warn("-------------------------\n");
4627         pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
4628                 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4629         print_lock(hlock);
4630         lockdep_print_held_locks(curr);
4631
4632         pr_warn("\nstack backtrace:\n");
4633         dump_stack();
4634 }
4635
4636 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4637                                 const void* lock_from, unsigned long lock_len)
4638 {
4639         return lock_from + lock_len <= mem_from ||
4640                 mem_from + mem_len <= lock_from;
4641 }
4642
4643 /*
4644  * Called when kernel memory is freed (or unmapped), or if a lock
4645  * is destroyed or reinitialized - this code checks whether there is
4646  * any held lock in the memory range of <from> to <to>:
4647  */
4648 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4649 {
4650         struct task_struct *curr = current;
4651         struct held_lock *hlock;
4652         unsigned long flags;
4653         int i;
4654
4655         if (unlikely(!debug_locks))
4656                 return;
4657
4658         raw_local_irq_save(flags);
4659         for (i = 0; i < curr->lockdep_depth; i++) {
4660                 hlock = curr->held_locks + i;
4661
4662                 if (not_in_range(mem_from, mem_len, hlock->instance,
4663                                         sizeof(*hlock->instance)))
4664                         continue;
4665
4666                 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4667                 break;
4668         }
4669         raw_local_irq_restore(flags);
4670 }
4671 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4672
4673 static void print_held_locks_bug(void)
4674 {
4675         if (!debug_locks_off())
4676                 return;
4677         if (debug_locks_silent)
4678                 return;
4679
4680         pr_warn("\n");
4681         pr_warn("====================================\n");
4682         pr_warn("WARNING: %s/%d still has locks held!\n",
4683                current->comm, task_pid_nr(current));
4684         print_kernel_ident();
4685         pr_warn("------------------------------------\n");
4686         lockdep_print_held_locks(current);
4687         pr_warn("\nstack backtrace:\n");
4688         dump_stack();
4689 }
4690
4691 void debug_check_no_locks_held(void)
4692 {
4693         if (unlikely(current->lockdep_depth > 0))
4694                 print_held_locks_bug();
4695 }
4696 EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4697
4698 #ifdef __KERNEL__
4699 void debug_show_all_locks(void)
4700 {
4701         struct task_struct *g, *p;
4702
4703         if (unlikely(!debug_locks)) {
4704                 pr_warn("INFO: lockdep is turned off.\n");
4705                 return;
4706         }
4707         pr_warn("\nShowing all locks held in the system:\n");
4708
4709         rcu_read_lock();
4710         for_each_process_thread(g, p) {
4711                 if (!p->lockdep_depth)
4712                         continue;
4713                 lockdep_print_held_locks(p);
4714                 touch_nmi_watchdog();
4715                 touch_all_softlockup_watchdogs();
4716         }
4717         rcu_read_unlock();
4718
4719         pr_warn("\n");
4720         pr_warn("=============================================\n\n");
4721 }
4722 EXPORT_SYMBOL_GPL(debug_show_all_locks);
4723 #endif
4724
4725 /*
4726  * Careful: only use this function if you are sure that
4727  * the task cannot run in parallel!
4728  */
4729 void debug_show_held_locks(struct task_struct *task)
4730 {
4731         if (unlikely(!debug_locks)) {
4732                 printk("INFO: lockdep is turned off.\n");
4733                 return;
4734         }
4735         lockdep_print_held_locks(task);
4736 }
4737 EXPORT_SYMBOL_GPL(debug_show_held_locks);
4738
4739 asmlinkage __visible void lockdep_sys_exit(void)
4740 {
4741         struct task_struct *curr = current;
4742
4743         if (unlikely(curr->lockdep_depth)) {
4744                 if (!debug_locks_off())
4745                         return;
4746                 pr_warn("\n");
4747                 pr_warn("================================================\n");
4748                 pr_warn("WARNING: lock held when returning to user space!\n");
4749                 print_kernel_ident();
4750                 pr_warn("------------------------------------------------\n");
4751                 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
4752                                 curr->comm, curr->pid);
4753                 lockdep_print_held_locks(curr);
4754         }
4755
4756         /*
4757          * The lock history for each syscall should be independent. So wipe the
4758          * slate clean on return to userspace.
4759          */
4760         lockdep_invariant_state(false);
4761 }
4762
4763 void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4764 {
4765         struct task_struct *curr = current;
4766
4767         /* Note: the following can be executed concurrently, so be careful. */
4768         pr_warn("\n");
4769         pr_warn("=============================\n");
4770         pr_warn("WARNING: suspicious RCU usage\n");
4771         print_kernel_ident();
4772         pr_warn("-----------------------------\n");
4773         pr_warn("%s:%d %s!\n", file, line, s);
4774         pr_warn("\nother info that might help us debug this:\n\n");
4775         pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4776                !rcu_lockdep_current_cpu_online()
4777                         ? "RCU used illegally from offline CPU!\n"
4778                         : !rcu_is_watching()
4779                                 ? "RCU used illegally from idle CPU!\n"
4780                                 : "",
4781                rcu_scheduler_active, debug_locks);
4782
4783         /*
4784          * If a CPU is in the RCU-free window in idle (ie: in the section
4785          * between rcu_idle_enter() and rcu_idle_exit(), then RCU
4786          * considers that CPU to be in an "extended quiescent state",
4787          * which means that RCU will be completely ignoring that CPU.
4788          * Therefore, rcu_read_lock() and friends have absolutely no
4789          * effect on a CPU running in that state. In other words, even if
4790          * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
4791          * delete data structures out from under it.  RCU really has no
4792          * choice here: we need to keep an RCU-free window in idle where
4793          * the CPU may possibly enter into low power mode. This way we can
4794          * notice an extended quiescent state to other CPUs that started a grace
4795          * period. Otherwise we would delay any grace period as long as we run
4796          * in the idle task.
4797          *
4798          * So complain bitterly if someone does call rcu_read_lock(),
4799          * rcu_read_lock_bh() and so on from extended quiescent states.
4800          */
4801         if (!rcu_is_watching())
4802                 pr_warn("RCU used illegally from extended quiescent state!\n");
4803
4804         lockdep_print_held_locks(curr);
4805         pr_warn("\nstack backtrace:\n");
4806         dump_stack();
4807 }
4808 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
This page took 0.301617 seconds and 4 git commands to generate.