]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * lib/kernel_lock.c | |
3 | * | |
4 | * This is the traditional BKL - big kernel lock. Largely | |
5895df96 | 5 | * relegated to obsolescence, but used by various less |
1da177e4 LT |
6 | * important (or lazy) subsystems. |
7 | */ | |
8 | #include <linux/smp_lock.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/kallsyms.h> | |
6188e10d | 11 | #include <linux/semaphore.h> |
1da177e4 | 12 | |
1da177e4 | 13 | /* |
8e3e076c | 14 | * The 'big kernel lock' |
1da177e4 | 15 | * |
8e3e076c | 16 | * This spinlock is taken and released recursively by lock_kernel() |
d6e05edc | 17 | * and unlock_kernel(). It is transparently dropped and reacquired |
1da177e4 LT |
18 | * over schedule(). It is used to protect legacy code that hasn't |
19 | * been migrated to a proper locking design yet. | |
20 | * | |
1da177e4 LT |
21 | * Don't use in new code. |
22 | */ | |
8e3e076c LT |
23 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); |
24 | ||
1da177e4 LT |
25 | |
26 | /* | |
8e3e076c | 27 | * Acquire/release the underlying lock from the scheduler. |
1da177e4 | 28 | * |
8e3e076c LT |
29 | * This is called with preemption disabled, and should |
30 | * return an error value if it cannot get the lock and | |
31 | * TIF_NEED_RESCHED gets set. | |
1da177e4 | 32 | * |
8e3e076c LT |
33 | * If it successfully gets the lock, it should increment |
34 | * the preemption count like any spinlock does. | |
35 | * | |
36 | * (This works on UP too - _raw_spin_trylock will never | |
37 | * return false in that case) | |
1da177e4 LT |
38 | */ |
39 | int __lockfunc __reacquire_kernel_lock(void) | |
40 | { | |
8e3e076c LT |
41 | while (!_raw_spin_trylock(&kernel_flag)) { |
42 | if (test_thread_flag(TIF_NEED_RESCHED)) | |
43 | return -EAGAIN; | |
44 | cpu_relax(); | |
45 | } | |
1da177e4 | 46 | preempt_disable(); |
1da177e4 LT |
47 | return 0; |
48 | } | |
49 | ||
50 | void __lockfunc __release_kernel_lock(void) | |
51 | { | |
8e3e076c LT |
52 | _raw_spin_unlock(&kernel_flag); |
53 | preempt_enable_no_resched(); | |
1da177e4 LT |
54 | } |
55 | ||
56 | /* | |
8e3e076c LT |
57 | * These are the BKL spinlocks - we try to be polite about preemption. |
58 | * If SMP is not on (ie UP preemption), this all goes away because the | |
59 | * _raw_spin_trylock() will always succeed. | |
1da177e4 | 60 | */ |
8e3e076c LT |
61 | #ifdef CONFIG_PREEMPT |
62 | static inline void __lock_kernel(void) | |
1da177e4 | 63 | { |
8e3e076c LT |
64 | preempt_disable(); |
65 | if (unlikely(!_raw_spin_trylock(&kernel_flag))) { | |
66 | /* | |
67 | * If preemption was disabled even before this | |
68 | * was called, there's nothing we can be polite | |
69 | * about - just spin. | |
70 | */ | |
71 | if (preempt_count() > 1) { | |
72 | _raw_spin_lock(&kernel_flag); | |
73 | return; | |
74 | } | |
1da177e4 | 75 | |
1da177e4 | 76 | /* |
8e3e076c LT |
77 | * Otherwise, let's wait for the kernel lock |
78 | * with preemption enabled.. | |
1da177e4 | 79 | */ |
8e3e076c LT |
80 | do { |
81 | preempt_enable(); | |
82 | while (spin_is_locked(&kernel_flag)) | |
83 | cpu_relax(); | |
84 | preempt_disable(); | |
85 | } while (!_raw_spin_trylock(&kernel_flag)); | |
86 | } | |
87 | } | |
1da177e4 | 88 | |
8e3e076c LT |
89 | #else |
90 | ||
91 | /* | |
92 | * Non-preemption case - just get the spinlock | |
93 | */ | |
94 | static inline void __lock_kernel(void) | |
95 | { | |
96 | _raw_spin_lock(&kernel_flag); | |
1da177e4 | 97 | } |
8e3e076c | 98 | #endif |
1da177e4 | 99 | |
8e3e076c | 100 | static inline void __unlock_kernel(void) |
1da177e4 | 101 | { |
8e3e076c LT |
102 | /* |
103 | * the BKL is not covered by lockdep, so we open-code the | |
104 | * unlocking sequence (and thus avoid the dep-chain ops): | |
105 | */ | |
106 | _raw_spin_unlock(&kernel_flag); | |
107 | preempt_enable(); | |
108 | } | |
1da177e4 | 109 | |
8e3e076c LT |
110 | /* |
111 | * Getting the big kernel lock. | |
112 | * | |
113 | * This cannot happen asynchronously, so we only need to | |
114 | * worry about other CPU's. | |
115 | */ | |
116 | void __lockfunc lock_kernel(void) | |
117 | { | |
118 | int depth = current->lock_depth+1; | |
119 | if (likely(!depth)) | |
120 | __lock_kernel(); | |
121 | current->lock_depth = depth; | |
122 | } | |
1da177e4 | 123 | |
8e3e076c LT |
124 | void __lockfunc unlock_kernel(void) |
125 | { | |
126 | BUG_ON(current->lock_depth < 0); | |
127 | if (likely(--current->lock_depth < 0)) | |
128 | __unlock_kernel(); | |
1da177e4 LT |
129 | } |
130 | ||
1da177e4 LT |
131 | EXPORT_SYMBOL(lock_kernel); |
132 | EXPORT_SYMBOL(unlock_kernel); | |
133 |