]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Generic waiting primitives. | |
3 | * | |
4 | * (C) 2004 William Irwin, Oracle | |
5 | */ | |
1da177e4 LT |
6 | #include <linux/init.h> |
7 | #include <linux/module.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/wait.h> | |
11 | #include <linux/hash.h> | |
12 | ||
2fc39111 | 13 | void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key) |
21d71f51 IM |
14 | { |
15 | spin_lock_init(&q->lock); | |
2fc39111 | 16 | lockdep_set_class(&q->lock, key); |
21d71f51 IM |
17 | INIT_LIST_HEAD(&q->task_list); |
18 | } | |
eb4542b9 | 19 | |
2fc39111 | 20 | EXPORT_SYMBOL(__init_waitqueue_head); |
eb4542b9 | 21 | |
7ad5b3a5 | 22 | void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) |
1da177e4 LT |
23 | { |
24 | unsigned long flags; | |
25 | ||
26 | wait->flags &= ~WQ_FLAG_EXCLUSIVE; | |
27 | spin_lock_irqsave(&q->lock, flags); | |
28 | __add_wait_queue(q, wait); | |
29 | spin_unlock_irqrestore(&q->lock, flags); | |
30 | } | |
31 | EXPORT_SYMBOL(add_wait_queue); | |
32 | ||
7ad5b3a5 | 33 | void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) |
1da177e4 LT |
34 | { |
35 | unsigned long flags; | |
36 | ||
37 | wait->flags |= WQ_FLAG_EXCLUSIVE; | |
38 | spin_lock_irqsave(&q->lock, flags); | |
39 | __add_wait_queue_tail(q, wait); | |
40 | spin_unlock_irqrestore(&q->lock, flags); | |
41 | } | |
42 | EXPORT_SYMBOL(add_wait_queue_exclusive); | |
43 | ||
7ad5b3a5 | 44 | void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) |
1da177e4 LT |
45 | { |
46 | unsigned long flags; | |
47 | ||
48 | spin_lock_irqsave(&q->lock, flags); | |
49 | __remove_wait_queue(q, wait); | |
50 | spin_unlock_irqrestore(&q->lock, flags); | |
51 | } | |
52 | EXPORT_SYMBOL(remove_wait_queue); | |
53 | ||
54 | ||
55 | /* | |
56 | * Note: we use "set_current_state()" _after_ the wait-queue add, | |
57 | * because we need a memory barrier there on SMP, so that any | |
58 | * wake-function that tests for the wait-queue being active | |
59 | * will be guaranteed to see waitqueue addition _or_ subsequent | |
60 | * tests in this thread will see the wakeup having taken place. | |
61 | * | |
62 | * The spin_unlock() itself is semi-permeable and only protects | |
63 | * one way (it only protects stuff inside the critical region and | |
64 | * stops them from bleeding out - it would still allow subsequent | |
59c51591 | 65 | * loads to move into the critical region). |
1da177e4 | 66 | */ |
7ad5b3a5 | 67 | void |
1da177e4 LT |
68 | prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) |
69 | { | |
70 | unsigned long flags; | |
71 | ||
72 | wait->flags &= ~WQ_FLAG_EXCLUSIVE; | |
73 | spin_lock_irqsave(&q->lock, flags); | |
74 | if (list_empty(&wait->task_list)) | |
75 | __add_wait_queue(q, wait); | |
a25d644f | 76 | set_current_state(state); |
1da177e4 LT |
77 | spin_unlock_irqrestore(&q->lock, flags); |
78 | } | |
79 | EXPORT_SYMBOL(prepare_to_wait); | |
80 | ||
7ad5b3a5 | 81 | void |
1da177e4 LT |
82 | prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) |
83 | { | |
84 | unsigned long flags; | |
85 | ||
86 | wait->flags |= WQ_FLAG_EXCLUSIVE; | |
87 | spin_lock_irqsave(&q->lock, flags); | |
88 | if (list_empty(&wait->task_list)) | |
89 | __add_wait_queue_tail(q, wait); | |
a25d644f | 90 | set_current_state(state); |
1da177e4 LT |
91 | spin_unlock_irqrestore(&q->lock, flags); |
92 | } | |
93 | EXPORT_SYMBOL(prepare_to_wait_exclusive); | |
94 | ||
777c6c5f JW |
95 | /* |
96 | * finish_wait - clean up after waiting in a queue | |
97 | * @q: waitqueue waited on | |
98 | * @wait: wait descriptor | |
99 | * | |
100 | * Sets current thread back to running state and removes | |
101 | * the wait descriptor from the given waitqueue if still | |
102 | * queued. | |
103 | */ | |
7ad5b3a5 | 104 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) |
1da177e4 LT |
105 | { |
106 | unsigned long flags; | |
107 | ||
108 | __set_current_state(TASK_RUNNING); | |
109 | /* | |
110 | * We can check for list emptiness outside the lock | |
111 | * IFF: | |
112 | * - we use the "careful" check that verifies both | |
113 | * the next and prev pointers, so that there cannot | |
114 | * be any half-pending updates in progress on other | |
115 | * CPU's that we haven't seen yet (and that might | |
116 | * still change the stack area. | |
117 | * and | |
118 | * - all other users take the lock (ie we can only | |
119 | * have _one_ other CPU that looks at or modifies | |
120 | * the list). | |
121 | */ | |
122 | if (!list_empty_careful(&wait->task_list)) { | |
123 | spin_lock_irqsave(&q->lock, flags); | |
124 | list_del_init(&wait->task_list); | |
125 | spin_unlock_irqrestore(&q->lock, flags); | |
126 | } | |
127 | } | |
128 | EXPORT_SYMBOL(finish_wait); | |
129 | ||
777c6c5f JW |
130 | /* |
131 | * abort_exclusive_wait - abort exclusive waiting in a queue | |
132 | * @q: waitqueue waited on | |
133 | * @wait: wait descriptor | |
134 | * @state: runstate of the waiter to be woken | |
135 | * @key: key to identify a wait bit queue or %NULL | |
136 | * | |
137 | * Sets current thread back to running state and removes | |
138 | * the wait descriptor from the given waitqueue if still | |
139 | * queued. | |
140 | * | |
141 | * Wakes up the next waiter if the caller is concurrently | |
142 | * woken up through the queue. | |
143 | * | |
144 | * This prevents waiter starvation where an exclusive waiter | |
145 | * aborts and is woken up concurrently and noone wakes up | |
146 | * the next waiter. | |
147 | */ | |
148 | void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, | |
149 | unsigned int mode, void *key) | |
150 | { | |
151 | unsigned long flags; | |
152 | ||
153 | __set_current_state(TASK_RUNNING); | |
154 | spin_lock_irqsave(&q->lock, flags); | |
155 | if (!list_empty(&wait->task_list)) | |
156 | list_del_init(&wait->task_list); | |
157 | else if (waitqueue_active(q)) | |
78ddb08f | 158 | __wake_up_locked_key(q, mode, key); |
777c6c5f JW |
159 | spin_unlock_irqrestore(&q->lock, flags); |
160 | } | |
161 | EXPORT_SYMBOL(abort_exclusive_wait); | |
162 | ||
1da177e4 LT |
163 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) |
164 | { | |
165 | int ret = default_wake_function(wait, mode, sync, key); | |
166 | ||
167 | if (ret) | |
168 | list_del_init(&wait->task_list); | |
169 | return ret; | |
170 | } | |
171 | EXPORT_SYMBOL(autoremove_wake_function); | |
172 | ||
173 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) | |
174 | { | |
175 | struct wait_bit_key *key = arg; | |
176 | struct wait_bit_queue *wait_bit | |
177 | = container_of(wait, struct wait_bit_queue, wait); | |
178 | ||
179 | if (wait_bit->key.flags != key->flags || | |
180 | wait_bit->key.bit_nr != key->bit_nr || | |
181 | test_bit(key->bit_nr, key->flags)) | |
182 | return 0; | |
183 | else | |
184 | return autoremove_wake_function(wait, mode, sync, key); | |
185 | } | |
186 | EXPORT_SYMBOL(wake_bit_function); | |
187 | ||
188 | /* | |
189 | * To allow interruptible waiting and asynchronous (i.e. nonblocking) | |
190 | * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are | |
191 | * permitted return codes. Nonzero return codes halt waiting and return. | |
192 | */ | |
7ad5b3a5 | 193 | int __sched |
1da177e4 LT |
194 | __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, |
195 | int (*action)(void *), unsigned mode) | |
196 | { | |
197 | int ret = 0; | |
198 | ||
199 | do { | |
200 | prepare_to_wait(wq, &q->wait, mode); | |
201 | if (test_bit(q->key.bit_nr, q->key.flags)) | |
202 | ret = (*action)(q->key.flags); | |
203 | } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); | |
204 | finish_wait(wq, &q->wait); | |
205 | return ret; | |
206 | } | |
207 | EXPORT_SYMBOL(__wait_on_bit); | |
208 | ||
7ad5b3a5 | 209 | int __sched out_of_line_wait_on_bit(void *word, int bit, |
1da177e4 LT |
210 | int (*action)(void *), unsigned mode) |
211 | { | |
212 | wait_queue_head_t *wq = bit_waitqueue(word, bit); | |
213 | DEFINE_WAIT_BIT(wait, word, bit); | |
214 | ||
215 | return __wait_on_bit(wq, &wait, action, mode); | |
216 | } | |
217 | EXPORT_SYMBOL(out_of_line_wait_on_bit); | |
218 | ||
7ad5b3a5 | 219 | int __sched |
1da177e4 LT |
220 | __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, |
221 | int (*action)(void *), unsigned mode) | |
222 | { | |
1da177e4 | 223 | do { |
777c6c5f JW |
224 | int ret; |
225 | ||
1da177e4 | 226 | prepare_to_wait_exclusive(wq, &q->wait, mode); |
777c6c5f JW |
227 | if (!test_bit(q->key.bit_nr, q->key.flags)) |
228 | continue; | |
229 | ret = action(q->key.flags); | |
230 | if (!ret) | |
231 | continue; | |
232 | abort_exclusive_wait(wq, &q->wait, mode, &q->key); | |
233 | return ret; | |
1da177e4 LT |
234 | } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); |
235 | finish_wait(wq, &q->wait); | |
777c6c5f | 236 | return 0; |
1da177e4 LT |
237 | } |
238 | EXPORT_SYMBOL(__wait_on_bit_lock); | |
239 | ||
7ad5b3a5 | 240 | int __sched out_of_line_wait_on_bit_lock(void *word, int bit, |
1da177e4 LT |
241 | int (*action)(void *), unsigned mode) |
242 | { | |
243 | wait_queue_head_t *wq = bit_waitqueue(word, bit); | |
244 | DEFINE_WAIT_BIT(wait, word, bit); | |
245 | ||
246 | return __wait_on_bit_lock(wq, &wait, action, mode); | |
247 | } | |
248 | EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); | |
249 | ||
7ad5b3a5 | 250 | void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) |
1da177e4 LT |
251 | { |
252 | struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); | |
253 | if (waitqueue_active(wq)) | |
e64d66c8 | 254 | __wake_up(wq, TASK_NORMAL, 1, &key); |
1da177e4 LT |
255 | } |
256 | EXPORT_SYMBOL(__wake_up_bit); | |
257 | ||
258 | /** | |
259 | * wake_up_bit - wake up a waiter on a bit | |
260 | * @word: the word being waited on, a kernel virtual address | |
261 | * @bit: the bit of the word being waited on | |
262 | * | |
263 | * There is a standard hashed waitqueue table for generic use. This | |
264 | * is the part of the hashtable's accessor API that wakes up waiters | |
265 | * on a bit. For instance, if one were to have waiters on a bitflag, | |
266 | * one would call wake_up_bit() after clearing the bit. | |
267 | * | |
268 | * In order for this to function properly, as it uses waitqueue_active() | |
269 | * internally, some kind of memory barrier must be done prior to calling | |
270 | * this. Typically, this will be smp_mb__after_clear_bit(), but in some | |
271 | * cases where bitflags are manipulated non-atomically under a lock, one | |
272 | * may need to use a less regular barrier, such fs/inode.c's smp_mb(), | |
273 | * because spin_unlock() does not guarantee a memory barrier. | |
274 | */ | |
7ad5b3a5 | 275 | void wake_up_bit(void *word, int bit) |
1da177e4 LT |
276 | { |
277 | __wake_up_bit(bit_waitqueue(word, bit), word, bit); | |
278 | } | |
279 | EXPORT_SYMBOL(wake_up_bit); | |
280 | ||
7ad5b3a5 | 281 | wait_queue_head_t *bit_waitqueue(void *word, int bit) |
1da177e4 LT |
282 | { |
283 | const int shift = BITS_PER_LONG == 32 ? 5 : 6; | |
284 | const struct zone *zone = page_zone(virt_to_page(word)); | |
285 | unsigned long val = (unsigned long)word << shift | bit; | |
286 | ||
287 | return &zone->wait_table[hash_long(val, zone->wait_table_bits)]; | |
288 | } | |
289 | EXPORT_SYMBOL(bit_waitqueue); |