]> Git Repo - linux.git/blame - include/linux/wait.h
Merge tag 'pull-path' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux.git] / include / linux / wait.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_WAIT_H
3#define _LINUX_WAIT_H
fb869b6e
IM
4/*
5 * Linux wait queue related types and methods
6 */
1da177e4
LT
7#include <linux/list.h>
8#include <linux/stddef.h>
9#include <linux/spinlock.h>
5b825c3a 10
1da177e4 11#include <asm/current.h>
607ca46e 12#include <uapi/linux/wait.h>
1da177e4 13
ac6424b9 14typedef struct wait_queue_entry wait_queue_entry_t;
50816c48
IM
15
16typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
1da177e4 18
ac6424b9 19/* wait_queue_entry::flags */
61ada528
PZ
20#define WQ_FLAG_EXCLUSIVE 0x01
21#define WQ_FLAG_WOKEN 0x02
2554db91 22#define WQ_FLAG_BOOKMARK 0x04
7f26482a 23#define WQ_FLAG_CUSTOM 0x08
5ef64cc8 24#define WQ_FLAG_DONE 0x10
c4d51a52 25#define WQ_FLAG_PRIORITY 0x20
61ada528 26
ac6424b9
IM
27/*
28 * A single wait-queue entry structure:
29 */
30struct wait_queue_entry {
fb869b6e 31 unsigned int flags;
fb869b6e
IM
32 void *private;
33 wait_queue_func_t func;
2055da97 34 struct list_head entry;
1da177e4
LT
35};
36
9d9d676f 37struct wait_queue_head {
fb869b6e 38 spinlock_t lock;
2055da97 39 struct list_head head;
1da177e4 40};
9d9d676f 41typedef struct wait_queue_head wait_queue_head_t;
1da177e4 42
8c65b4a6 43struct task_struct;
1da177e4
LT
44
45/*
46 * Macros for declaration and initialisaton of the datatypes
47 */
48
4b1c480b
IM
49#define __WAITQUEUE_INITIALIZER(name, tsk) { \
50 .private = tsk, \
51 .func = default_wake_function, \
2055da97 52 .entry = { NULL, NULL } }
1da177e4 53
4b1c480b 54#define DECLARE_WAITQUEUE(name, tsk) \
50816c48 55 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
1da177e4 56
4b1c480b
IM
57#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
58 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
77eccd0d 59 .head = LIST_HEAD_INIT(name.head) }
1da177e4
LT
60
61#define DECLARE_WAIT_QUEUE_HEAD(name) \
9d9d676f 62 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
1da177e4 63
9d9d676f 64extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
2fc39111 65
4b1c480b
IM
66#define init_waitqueue_head(wq_head) \
67 do { \
68 static struct lock_class_key __key; \
69 \
70 __init_waitqueue_head((wq_head), #wq_head, &__key); \
2fc39111 71 } while (0)
1da177e4 72
7259f0d0
PZ
73#ifdef CONFIG_LOCKDEP
74# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
75 ({ init_waitqueue_head(&name); name; })
76# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
9d9d676f 77 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
7259f0d0
PZ
78#else
79# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
80#endif
81
50816c48 82static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
1da177e4 83{
50816c48
IM
84 wq_entry->flags = 0;
85 wq_entry->private = p;
86 wq_entry->func = default_wake_function;
1da177e4
LT
87}
88
fb869b6e 89static inline void
50816c48 90init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
1da177e4 91{
50816c48
IM
92 wq_entry->flags = 0;
93 wq_entry->private = NULL;
94 wq_entry->func = func;
1da177e4
LT
95}
96
69e51e92
PZ
97/**
98 * waitqueue_active -- locklessly test for waiters on the queue
9d9d676f 99 * @wq_head: the waitqueue to test for waiters
69e51e92
PZ
100 *
101 * returns true if the wait list is not empty
102 *
103 * NOTE: this function is lockless and requires care, incorrect usage _will_
104 * lead to sporadic and non-obvious failure.
105 *
9d9d676f 106 * Use either while holding wait_queue_head::lock or when used for wakeups
8c1007fd 107 * with an extra smp_mb() like::
69e51e92
PZ
108 *
109 * CPU0 - waker CPU1 - waiter
110 *
111 * for (;;) {
4b1c480b 112 * @cond = true; prepare_to_wait(&wq_head, &wait, state);
69e51e92 113 * smp_mb(); // smp_mb() from set_current_state()
4b1c480b
IM
114 * if (waitqueue_active(wq_head)) if (@cond)
115 * wake_up(wq_head); break;
69e51e92
PZ
116 * schedule();
117 * }
4b1c480b 118 * finish_wait(&wq_head, &wait);
69e51e92
PZ
119 *
120 * Because without the explicit smp_mb() it's possible for the
121 * waitqueue_active() load to get hoisted over the @cond store such that we'll
122 * observe an empty wait list while the waiter might not observe @cond.
123 *
124 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
125 * which (when the lock is uncontended) are of roughly equal cost.
126 */
9d9d676f 127static inline int waitqueue_active(struct wait_queue_head *wq_head)
1da177e4 128{
2055da97 129 return !list_empty(&wq_head->head);
1da177e4
LT
130}
131
a6d81d30
JB
132/**
133 * wq_has_single_sleeper - check if there is only one sleeper
134 * @wq_head: wait queue head
135 *
136 * Returns true of wq_head has only one sleeper on the list.
137 *
138 * Please refer to the comment for waitqueue_active.
139 */
140static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
141{
142 return list_is_singular(&wq_head->head);
143}
144
1ce0bf50
HX
145/**
146 * wq_has_sleeper - check if there are any waiting processes
4b1c480b 147 * @wq_head: wait queue head
1ce0bf50 148 *
4b1c480b 149 * Returns true if wq_head has waiting processes
1ce0bf50
HX
150 *
151 * Please refer to the comment for waitqueue_active.
152 */
9d9d676f 153static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
1ce0bf50
HX
154{
155 /*
156 * We need to be sure we are in sync with the
157 * add_wait_queue modifications to the wait queue.
158 *
159 * This memory barrier should be paired with one on the
160 * waiting side.
161 */
162 smp_mb();
9d9d676f 163 return waitqueue_active(wq_head);
1ce0bf50
HX
164}
165
9d9d676f
IM
166extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
c4d51a52 168extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
9d9d676f 169extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1da177e4 170
9d9d676f 171static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
1da177e4 172{
c4d51a52
DW
173 struct list_head *head = &wq_head->head;
174 struct wait_queue_entry *wq;
175
176 list_for_each_entry(wq, &wq_head->head, entry) {
177 if (!(wq->flags & WQ_FLAG_PRIORITY))
178 break;
179 head = &wq->entry;
180 }
181 list_add(&wq_entry->entry, head);
1da177e4
LT
182}
183
184/*
185 * Used for wake-one threads:
186 */
fb869b6e 187static inline void
9d9d676f 188__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
a93d2f17 189{
50816c48 190 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
9d9d676f 191 __add_wait_queue(wq_head, wq_entry);
a93d2f17
CG
192}
193
9d9d676f 194static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
1da177e4 195{
2055da97 196 list_add_tail(&wq_entry->entry, &wq_head->head);
1da177e4
LT
197}
198
fb869b6e 199static inline void
9d9d676f 200__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
a93d2f17 201{
50816c48 202 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
9d9d676f 203 __add_wait_queue_entry_tail(wq_head, wq_entry);
a93d2f17
CG
204}
205
fb869b6e 206static inline void
9d9d676f 207__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
1da177e4 208{
2055da97 209 list_del(&wq_entry->entry);
1da177e4
LT
210}
211
9d9d676f
IM
212void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
213void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
11a19c7b
TC
214void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
215 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
ce4dd442 216void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
f94df989 217void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
9d9d676f 218void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
ce4dd442 219void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
42288cb4 220void __wake_up_pollfree(struct wait_queue_head *wq_head);
1da177e4 221
e64d66c8
MW
222#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
223#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
224#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
63b20011
TG
225#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
226#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
e64d66c8 227
1da177e4
LT
228#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
229#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
230#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
ce4dd442 231#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
1da177e4 232
0ccf831c 233/*
c0da3775 234 * Wakeup macros to be used to report events to the targets.
0ccf831c 235 */
3ad6f93e
AV
236#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
237#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
4b1c480b 238#define wake_up_poll(x, m) \
3ad6f93e 239 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
4b1c480b 240#define wake_up_locked_poll(x, m) \
3ad6f93e 241 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
4b1c480b 242#define wake_up_interruptible_poll(x, m) \
3ad6f93e 243 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
4b1c480b 244#define wake_up_interruptible_sync_poll(x, m) \
ce4dd442 245 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
f94df989
DH
246#define wake_up_interruptible_sync_poll_locked(x, m) \
247 __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
0ccf831c 248
42288cb4
EB
249/**
250 * wake_up_pollfree - signal that a polled waitqueue is going away
251 * @wq_head: the wait queue head
252 *
253 * In the very rare cases where a ->poll() implementation uses a waitqueue whose
254 * lifetime is tied to a task rather than to the 'struct file' being polled,
255 * this function must be called before the waitqueue is freed so that
256 * non-blocking polls (e.g. epoll) are notified that the queue is going away.
257 *
258 * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
259 * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
260 */
261static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
262{
263 /*
264 * For performance reasons, we don't always take the queue lock here.
265 * Therefore, we might race with someone removing the last entry from
266 * the queue, and proceed while they still hold the queue lock.
267 * However, rcu_read_lock() is required to be held in such cases, so we
268 * can safely proceed with an RCU-delayed free.
269 */
270 if (waitqueue_active(wq_head))
271 __wake_up_pollfree(wq_head);
272}
273
4b1c480b
IM
274#define ___wait_cond_timeout(condition) \
275({ \
276 bool __cond = (condition); \
277 if (__cond && !__ret) \
278 __ret = 1; \
279 __cond || !__ret; \
2953ef24
PZ
280})
281
4b1c480b
IM
282#define ___wait_is_interruptible(state) \
283 (!__builtin_constant_p(state) || \
284 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
41a1431b 285
50816c48 286extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
0176beaf 287
8b32201d
PZ
288/*
289 * The below macro ___wait_event() has an explicit shadow of the __ret
290 * variable when used from the wait_event_*() macros.
291 *
292 * This is so that both can use the ___wait_cond_timeout() construct
293 * to wrap the condition.
294 *
295 * The type inconsistency of the wait_event_*() __ret variable is also
296 * on purpose; we use long where we can return timeout values and int
297 * otherwise.
298 */
299
4b1c480b
IM
300#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
301({ \
302 __label__ __out; \
303 struct wait_queue_entry __wq_entry; \
304 long __ret = ret; /* explicit shadow */ \
305 \
306 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
307 for (;;) { \
308 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
309 \
310 if (condition) \
311 break; \
312 \
313 if (___wait_is_interruptible(state) && __int) { \
314 __ret = __int; \
315 goto __out; \
316 } \
317 \
318 cmd; \
319 } \
320 finish_wait(&wq_head, &__wq_entry); \
321__out: __ret; \
35a2af94 322})
41a1431b 323
4b1c480b
IM
324#define __wait_event(wq_head, condition) \
325 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
35a2af94 326 schedule())
1da177e4
LT
327
328/**
329 * wait_event - sleep until a condition gets true
4b1c480b 330 * @wq_head: the waitqueue to wait on
1da177e4
LT
331 * @condition: a C expression for the event to wait for
332 *
333 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
334 * @condition evaluates to true. The @condition is checked each time
4b1c480b 335 * the waitqueue @wq_head is woken up.
1da177e4
LT
336 *
337 * wake_up() has to be called after changing any variable that could
338 * change the result of the wait condition.
339 */
4b1c480b
IM
340#define wait_event(wq_head, condition) \
341do { \
342 might_sleep(); \
343 if (condition) \
344 break; \
345 __wait_event(wq_head, condition); \
1da177e4
LT
346} while (0)
347
4b1c480b
IM
348#define __io_wait_event(wq_head, condition) \
349 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
2c561246
PZ
350 io_schedule())
351
352/*
353 * io_wait_event() -- like wait_event() but with io_schedule()
354 */
4b1c480b
IM
355#define io_wait_event(wq_head, condition) \
356do { \
357 might_sleep(); \
358 if (condition) \
359 break; \
360 __io_wait_event(wq_head, condition); \
2c561246
PZ
361} while (0)
362
4b1c480b
IM
363#define __wait_event_freezable(wq_head, condition) \
364 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
2b9c2a48 365 freezable_schedule())
36df04bc
PZ
366
367/**
f4bcfa1d 368 * wait_event_freezable - sleep (or freeze) until a condition gets true
4b1c480b 369 * @wq_head: the waitqueue to wait on
36df04bc
PZ
370 * @condition: a C expression for the event to wait for
371 *
372 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
373 * to system load) until the @condition evaluates to true. The
4b1c480b 374 * @condition is checked each time the waitqueue @wq_head is woken up.
36df04bc
PZ
375 *
376 * wake_up() has to be called after changing any variable that could
377 * change the result of the wait condition.
378 */
4b1c480b
IM
379#define wait_event_freezable(wq_head, condition) \
380({ \
381 int __ret = 0; \
382 might_sleep(); \
383 if (!(condition)) \
384 __ret = __wait_event_freezable(wq_head, condition); \
385 __ret; \
36df04bc
PZ
386})
387
4b1c480b
IM
388#define __wait_event_timeout(wq_head, condition, timeout) \
389 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
390 TASK_UNINTERRUPTIBLE, 0, timeout, \
35a2af94 391 __ret = schedule_timeout(__ret))
1da177e4
LT
392
393/**
394 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
4b1c480b 395 * @wq_head: the waitqueue to wait on
1da177e4
LT
396 * @condition: a C expression for the event to wait for
397 * @timeout: timeout, in jiffies
398 *
399 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
400 * @condition evaluates to true. The @condition is checked each time
4b1c480b 401 * the waitqueue @wq_head is woken up.
1da177e4
LT
402 *
403 * wake_up() has to be called after changing any variable that could
404 * change the result of the wait condition.
405 *
6b44f519
SD
406 * Returns:
407 * 0 if the @condition evaluated to %false after the @timeout elapsed,
408 * 1 if the @condition evaluated to %true after the @timeout elapsed,
409 * or the remaining jiffies (at least 1) if the @condition evaluated
410 * to %true before the @timeout elapsed.
1da177e4 411 */
4b1c480b
IM
412#define wait_event_timeout(wq_head, condition, timeout) \
413({ \
414 long __ret = timeout; \
415 might_sleep(); \
416 if (!___wait_cond_timeout(condition)) \
417 __ret = __wait_event_timeout(wq_head, condition, timeout); \
418 __ret; \
1da177e4
LT
419})
420
4b1c480b
IM
421#define __wait_event_freezable_timeout(wq_head, condition, timeout) \
422 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
423 TASK_INTERRUPTIBLE, 0, timeout, \
2b9c2a48 424 __ret = freezable_schedule_timeout(__ret))
36df04bc
PZ
425
426/*
427 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
428 * increasing load and is freezable.
429 */
4b1c480b
IM
430#define wait_event_freezable_timeout(wq_head, condition, timeout) \
431({ \
432 long __ret = timeout; \
433 might_sleep(); \
434 if (!___wait_cond_timeout(condition)) \
435 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
436 __ret; \
36df04bc
PZ
437})
438
4b1c480b
IM
439#define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
440 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
9f3520c3
YL
441 cmd1; schedule(); cmd2)
442/*
443 * Just like wait_event_cmd(), except it sets exclusive flag
444 */
4b1c480b
IM
445#define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
446do { \
447 if (condition) \
448 break; \
449 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
9f3520c3
YL
450} while (0)
451
4b1c480b
IM
452#define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
453 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
82e06c81
SL
454 cmd1; schedule(); cmd2)
455
456/**
457 * wait_event_cmd - sleep until a condition gets true
4b1c480b 458 * @wq_head: the waitqueue to wait on
82e06c81 459 * @condition: a C expression for the event to wait for
f434f7af
MI
460 * @cmd1: the command will be executed before sleep
461 * @cmd2: the command will be executed after sleep
82e06c81
SL
462 *
463 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
464 * @condition evaluates to true. The @condition is checked each time
4b1c480b 465 * the waitqueue @wq_head is woken up.
82e06c81
SL
466 *
467 * wake_up() has to be called after changing any variable that could
468 * change the result of the wait condition.
469 */
4b1c480b
IM
470#define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
471do { \
472 if (condition) \
473 break; \
474 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
82e06c81
SL
475} while (0)
476
4b1c480b
IM
477#define __wait_event_interruptible(wq_head, condition) \
478 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
f13f4c41 479 schedule())
1da177e4
LT
480
481/**
482 * wait_event_interruptible - sleep until a condition gets true
4b1c480b 483 * @wq_head: the waitqueue to wait on
1da177e4
LT
484 * @condition: a C expression for the event to wait for
485 *
486 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
487 * @condition evaluates to true or a signal is received.
4b1c480b 488 * The @condition is checked each time the waitqueue @wq_head is woken up.
1da177e4
LT
489 *
490 * wake_up() has to be called after changing any variable that could
491 * change the result of the wait condition.
492 *
493 * The function will return -ERESTARTSYS if it was interrupted by a
494 * signal and 0 if @condition evaluated to true.
495 */
4b1c480b
IM
496#define wait_event_interruptible(wq_head, condition) \
497({ \
498 int __ret = 0; \
499 might_sleep(); \
500 if (!(condition)) \
501 __ret = __wait_event_interruptible(wq_head, condition); \
502 __ret; \
1da177e4
LT
503})
504
4b1c480b
IM
505#define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
506 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
507 TASK_INTERRUPTIBLE, 0, timeout, \
35a2af94 508 __ret = schedule_timeout(__ret))
1da177e4
LT
509
510/**
511 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
4b1c480b 512 * @wq_head: the waitqueue to wait on
1da177e4
LT
513 * @condition: a C expression for the event to wait for
514 * @timeout: timeout, in jiffies
515 *
516 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
517 * @condition evaluates to true or a signal is received.
4b1c480b 518 * The @condition is checked each time the waitqueue @wq_head is woken up.
1da177e4
LT
519 *
520 * wake_up() has to be called after changing any variable that could
521 * change the result of the wait condition.
522 *
4c663cfc 523 * Returns:
6b44f519
SD
524 * 0 if the @condition evaluated to %false after the @timeout elapsed,
525 * 1 if the @condition evaluated to %true after the @timeout elapsed,
526 * the remaining jiffies (at least 1) if the @condition evaluated
527 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
528 * interrupted by a signal.
1da177e4 529 */
4b1c480b
IM
530#define wait_event_interruptible_timeout(wq_head, condition, timeout) \
531({ \
532 long __ret = timeout; \
533 might_sleep(); \
534 if (!___wait_cond_timeout(condition)) \
535 __ret = __wait_event_interruptible_timeout(wq_head, \
536 condition, timeout); \
537 __ret; \
1da177e4
LT
538})
539
4b1c480b
IM
540#define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
541({ \
542 int __ret = 0; \
543 struct hrtimer_sleeper __t; \
544 \
dbc1625f
SAS
545 hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
546 HRTIMER_MODE_REL); \
cceeeb6a
JL
547 if ((timeout) != KTIME_MAX) { \
548 hrtimer_set_expires_range_ns(&__t.timer, timeout, \
549 current->timer_slack_ns); \
550 hrtimer_sleeper_start_expires(&__t, HRTIMER_MODE_REL); \
551 } \
4b1c480b
IM
552 \
553 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
554 if (!__t.task) { \
555 __ret = -ETIME; \
556 break; \
557 } \
558 schedule()); \
559 \
560 hrtimer_cancel(&__t.timer); \
561 destroy_hrtimer_on_stack(&__t.timer); \
562 __ret; \
774a08b3
KO
563})
564
565/**
566 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
4b1c480b 567 * @wq_head: the waitqueue to wait on
774a08b3
KO
568 * @condition: a C expression for the event to wait for
569 * @timeout: timeout, as a ktime_t
570 *
571 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
572 * @condition evaluates to true or a signal is received.
4b1c480b 573 * The @condition is checked each time the waitqueue @wq_head is woken up.
774a08b3
KO
574 *
575 * wake_up() has to be called after changing any variable that could
576 * change the result of the wait condition.
577 *
578 * The function returns 0 if @condition became true, or -ETIME if the timeout
579 * elapsed.
580 */
4b1c480b
IM
581#define wait_event_hrtimeout(wq_head, condition, timeout) \
582({ \
583 int __ret = 0; \
584 might_sleep(); \
585 if (!(condition)) \
586 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
587 TASK_UNINTERRUPTIBLE); \
588 __ret; \
774a08b3
KO
589})
590
591/**
592 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
6c423f57 593 * @wq: the waitqueue to wait on
774a08b3
KO
594 * @condition: a C expression for the event to wait for
595 * @timeout: timeout, as a ktime_t
596 *
597 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
598 * @condition evaluates to true or a signal is received.
6c423f57 599 * The @condition is checked each time the waitqueue @wq is woken up.
774a08b3
KO
600 *
601 * wake_up() has to be called after changing any variable that could
602 * change the result of the wait condition.
603 *
604 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
605 * interrupted by a signal, or -ETIME if the timeout elapsed.
606 */
4b1c480b
IM
607#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
608({ \
609 long __ret = 0; \
610 might_sleep(); \
611 if (!(condition)) \
612 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
613 TASK_INTERRUPTIBLE); \
614 __ret; \
774a08b3
KO
615})
616
4b1c480b
IM
617#define __wait_event_interruptible_exclusive(wq, condition) \
618 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
48c25217 619 schedule())
1da177e4 620
4b1c480b
IM
621#define wait_event_interruptible_exclusive(wq, condition) \
622({ \
623 int __ret = 0; \
624 might_sleep(); \
625 if (!(condition)) \
626 __ret = __wait_event_interruptible_exclusive(wq, condition); \
627 __ret; \
1da177e4
LT
628})
629
4b1c480b
IM
630#define __wait_event_killable_exclusive(wq, condition) \
631 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
6a0fb306
AV
632 schedule())
633
4b1c480b
IM
634#define wait_event_killable_exclusive(wq, condition) \
635({ \
636 int __ret = 0; \
637 might_sleep(); \
638 if (!(condition)) \
639 __ret = __wait_event_killable_exclusive(wq, condition); \
640 __ret; \
6a0fb306
AV
641})
642
22c43c81 643
4b1c480b
IM
644#define __wait_event_freezable_exclusive(wq, condition) \
645 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
2b9c2a48 646 freezable_schedule())
36df04bc 647
4b1c480b
IM
648#define wait_event_freezable_exclusive(wq, condition) \
649({ \
650 int __ret = 0; \
651 might_sleep(); \
652 if (!(condition)) \
653 __ret = __wait_event_freezable_exclusive(wq, condition); \
654 __ret; \
36df04bc
PZ
655})
656
0957a2c1
N
657/**
658 * wait_event_idle - wait for a condition without contributing to system load
659 * @wq_head: the waitqueue to wait on
660 * @condition: a C expression for the event to wait for
661 *
662 * The process is put to sleep (TASK_IDLE) until the
663 * @condition evaluates to true.
664 * The @condition is checked each time the waitqueue @wq_head is woken up.
665 *
666 * wake_up() has to be called after changing any variable that could
667 * change the result of the wait condition.
668 *
669 */
670#define wait_event_idle(wq_head, condition) \
671do { \
672 might_sleep(); \
673 if (!(condition)) \
674 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
675} while (0)
676
677/**
678 * wait_event_idle_exclusive - wait for a condition with contributing to system load
679 * @wq_head: the waitqueue to wait on
680 * @condition: a C expression for the event to wait for
681 *
682 * The process is put to sleep (TASK_IDLE) until the
683 * @condition evaluates to true.
684 * The @condition is checked each time the waitqueue @wq_head is woken up.
685 *
686 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
687 * set thus if other processes wait on the same list, when this
688 * process is woken further processes are not considered.
689 *
690 * wake_up() has to be called after changing any variable that could
691 * change the result of the wait condition.
692 *
693 */
694#define wait_event_idle_exclusive(wq_head, condition) \
695do { \
696 might_sleep(); \
697 if (!(condition)) \
698 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
699} while (0)
700
701#define __wait_event_idle_timeout(wq_head, condition, timeout) \
702 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
703 TASK_IDLE, 0, timeout, \
704 __ret = schedule_timeout(__ret))
705
706/**
707 * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
708 * @wq_head: the waitqueue to wait on
709 * @condition: a C expression for the event to wait for
710 * @timeout: timeout, in jiffies
711 *
712 * The process is put to sleep (TASK_IDLE) until the
713 * @condition evaluates to true. The @condition is checked each time
714 * the waitqueue @wq_head is woken up.
715 *
716 * wake_up() has to be called after changing any variable that could
717 * change the result of the wait condition.
718 *
719 * Returns:
720 * 0 if the @condition evaluated to %false after the @timeout elapsed,
721 * 1 if the @condition evaluated to %true after the @timeout elapsed,
722 * or the remaining jiffies (at least 1) if the @condition evaluated
723 * to %true before the @timeout elapsed.
724 */
725#define wait_event_idle_timeout(wq_head, condition, timeout) \
726({ \
727 long __ret = timeout; \
728 might_sleep(); \
729 if (!___wait_cond_timeout(condition)) \
730 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
731 __ret; \
732})
733
734#define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
735 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
736 TASK_IDLE, 1, timeout, \
737 __ret = schedule_timeout(__ret))
738
739/**
740 * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
741 * @wq_head: the waitqueue to wait on
742 * @condition: a C expression for the event to wait for
743 * @timeout: timeout, in jiffies
744 *
745 * The process is put to sleep (TASK_IDLE) until the
746 * @condition evaluates to true. The @condition is checked each time
747 * the waitqueue @wq_head is woken up.
748 *
749 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
750 * set thus if other processes wait on the same list, when this
751 * process is woken further processes are not considered.
752 *
753 * wake_up() has to be called after changing any variable that could
754 * change the result of the wait condition.
755 *
756 * Returns:
757 * 0 if the @condition evaluated to %false after the @timeout elapsed,
758 * 1 if the @condition evaluated to %true after the @timeout elapsed,
759 * or the remaining jiffies (at least 1) if the @condition evaluated
760 * to %true before the @timeout elapsed.
761 */
762#define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
763({ \
764 long __ret = timeout; \
765 might_sleep(); \
766 if (!___wait_cond_timeout(condition)) \
767 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
768 __ret; \
769})
770
ac6424b9
IM
771extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
772extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
36df04bc 773
4b1c480b
IM
774#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
775({ \
776 int __ret; \
777 DEFINE_WAIT(__wait); \
778 if (exclusive) \
779 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
780 do { \
781 __ret = fn(&(wq), &__wait); \
782 if (__ret) \
783 break; \
784 } while (!(condition)); \
785 __remove_wait_queue(&(wq), &__wait); \
786 __set_current_state(TASK_RUNNING); \
787 __ret; \
22c43c81
MN
788})
789
790
791/**
792 * wait_event_interruptible_locked - sleep until a condition gets true
793 * @wq: the waitqueue to wait on
794 * @condition: a C expression for the event to wait for
795 *
796 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
797 * @condition evaluates to true or a signal is received.
798 * The @condition is checked each time the waitqueue @wq is woken up.
799 *
800 * It must be called with wq.lock being held. This spinlock is
801 * unlocked while sleeping but @condition testing is done while lock
802 * is held and when this macro exits the lock is held.
803 *
804 * The lock is locked/unlocked using spin_lock()/spin_unlock()
805 * functions which must match the way they are locked/unlocked outside
806 * of this macro.
807 *
808 * wake_up_locked() has to be called after changing any variable that could
809 * change the result of the wait condition.
810 *
811 * The function will return -ERESTARTSYS if it was interrupted by a
812 * signal and 0 if @condition evaluated to true.
813 */
4b1c480b
IM
814#define wait_event_interruptible_locked(wq, condition) \
815 ((condition) \
bd0f9b35 816 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
22c43c81
MN
817
818/**
819 * wait_event_interruptible_locked_irq - sleep until a condition gets true
820 * @wq: the waitqueue to wait on
821 * @condition: a C expression for the event to wait for
822 *
823 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
824 * @condition evaluates to true or a signal is received.
825 * The @condition is checked each time the waitqueue @wq is woken up.
826 *
827 * It must be called with wq.lock being held. This spinlock is
828 * unlocked while sleeping but @condition testing is done while lock
829 * is held and when this macro exits the lock is held.
830 *
831 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
832 * functions which must match the way they are locked/unlocked outside
833 * of this macro.
834 *
835 * wake_up_locked() has to be called after changing any variable that could
836 * change the result of the wait condition.
837 *
838 * The function will return -ERESTARTSYS if it was interrupted by a
839 * signal and 0 if @condition evaluated to true.
840 */
4b1c480b
IM
841#define wait_event_interruptible_locked_irq(wq, condition) \
842 ((condition) \
bd0f9b35 843 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
22c43c81
MN
844
845/**
846 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
847 * @wq: the waitqueue to wait on
848 * @condition: a C expression for the event to wait for
849 *
850 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
851 * @condition evaluates to true or a signal is received.
852 * The @condition is checked each time the waitqueue @wq is woken up.
853 *
854 * It must be called with wq.lock being held. This spinlock is
855 * unlocked while sleeping but @condition testing is done while lock
856 * is held and when this macro exits the lock is held.
857 *
858 * The lock is locked/unlocked using spin_lock()/spin_unlock()
859 * functions which must match the way they are locked/unlocked outside
860 * of this macro.
861 *
862 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
863 * set thus when other process waits process on the list if this
864 * process is awaken further processes are not considered.
865 *
866 * wake_up_locked() has to be called after changing any variable that could
867 * change the result of the wait condition.
868 *
869 * The function will return -ERESTARTSYS if it was interrupted by a
870 * signal and 0 if @condition evaluated to true.
871 */
4b1c480b
IM
872#define wait_event_interruptible_exclusive_locked(wq, condition) \
873 ((condition) \
bd0f9b35 874 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
22c43c81
MN
875
876/**
877 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
878 * @wq: the waitqueue to wait on
879 * @condition: a C expression for the event to wait for
880 *
881 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
882 * @condition evaluates to true or a signal is received.
883 * The @condition is checked each time the waitqueue @wq is woken up.
884 *
885 * It must be called with wq.lock being held. This spinlock is
886 * unlocked while sleeping but @condition testing is done while lock
887 * is held and when this macro exits the lock is held.
888 *
889 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
890 * functions which must match the way they are locked/unlocked outside
891 * of this macro.
892 *
893 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
894 * set thus when other process waits process on the list if this
895 * process is awaken further processes are not considered.
896 *
897 * wake_up_locked() has to be called after changing any variable that could
898 * change the result of the wait condition.
899 *
900 * The function will return -ERESTARTSYS if it was interrupted by a
901 * signal and 0 if @condition evaluated to true.
902 */
4b1c480b
IM
903#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
904 ((condition) \
bd0f9b35 905 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
22c43c81
MN
906
907
4b1c480b 908#define __wait_event_killable(wq, condition) \
35a2af94 909 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
1411d5a7
MW
910
911/**
912 * wait_event_killable - sleep until a condition gets true
6c423f57 913 * @wq_head: the waitqueue to wait on
1411d5a7
MW
914 * @condition: a C expression for the event to wait for
915 *
916 * The process is put to sleep (TASK_KILLABLE) until the
917 * @condition evaluates to true or a signal is received.
6c423f57 918 * The @condition is checked each time the waitqueue @wq_head is woken up.
1411d5a7
MW
919 *
920 * wake_up() has to be called after changing any variable that could
921 * change the result of the wait condition.
922 *
923 * The function will return -ERESTARTSYS if it was interrupted by a
924 * signal and 0 if @condition evaluated to true.
925 */
4b1c480b
IM
926#define wait_event_killable(wq_head, condition) \
927({ \
928 int __ret = 0; \
929 might_sleep(); \
930 if (!(condition)) \
931 __ret = __wait_event_killable(wq_head, condition); \
932 __ret; \
1411d5a7
MW
933})
934
8ada9279
LR
935#define __wait_event_killable_timeout(wq_head, condition, timeout) \
936 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
937 TASK_KILLABLE, 0, timeout, \
938 __ret = schedule_timeout(__ret))
939
940/**
941 * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
942 * @wq_head: the waitqueue to wait on
943 * @condition: a C expression for the event to wait for
944 * @timeout: timeout, in jiffies
945 *
946 * The process is put to sleep (TASK_KILLABLE) until the
947 * @condition evaluates to true or a kill signal is received.
948 * The @condition is checked each time the waitqueue @wq_head is woken up.
949 *
950 * wake_up() has to be called after changing any variable that could
951 * change the result of the wait condition.
952 *
953 * Returns:
954 * 0 if the @condition evaluated to %false after the @timeout elapsed,
955 * 1 if the @condition evaluated to %true after the @timeout elapsed,
956 * the remaining jiffies (at least 1) if the @condition evaluated
957 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
958 * interrupted by a kill signal.
959 *
960 * Only kill signals interrupt this process.
961 */
962#define wait_event_killable_timeout(wq_head, condition, timeout) \
963({ \
964 long __ret = timeout; \
965 might_sleep(); \
966 if (!___wait_cond_timeout(condition)) \
967 __ret = __wait_event_killable_timeout(wq_head, \
968 condition, timeout); \
969 __ret; \
970})
971
eed8c02e 972
4b1c480b
IM
973#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
974 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
975 spin_unlock_irq(&lock); \
976 cmd; \
977 schedule(); \
35a2af94 978 spin_lock_irq(&lock))
eed8c02e
LC
979
980/**
981 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
982 * condition is checked under the lock. This
983 * is expected to be called with the lock
984 * taken.
4b1c480b 985 * @wq_head: the waitqueue to wait on
eed8c02e
LC
986 * @condition: a C expression for the event to wait for
987 * @lock: a locked spinlock_t, which will be released before cmd
988 * and schedule() and reacquired afterwards.
989 * @cmd: a command which is invoked outside the critical section before
990 * sleep
991 *
992 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
993 * @condition evaluates to true. The @condition is checked each time
4b1c480b 994 * the waitqueue @wq_head is woken up.
eed8c02e
LC
995 *
996 * wake_up() has to be called after changing any variable that could
997 * change the result of the wait condition.
998 *
999 * This is supposed to be called while holding the lock. The lock is
1000 * dropped before invoking the cmd and going to sleep and is reacquired
1001 * afterwards.
1002 */
4b1c480b
IM
1003#define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
1004do { \
1005 if (condition) \
1006 break; \
1007 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
eed8c02e
LC
1008} while (0)
1009
1010/**
1011 * wait_event_lock_irq - sleep until a condition gets true. The
1012 * condition is checked under the lock. This
1013 * is expected to be called with the lock
1014 * taken.
4b1c480b 1015 * @wq_head: the waitqueue to wait on
eed8c02e
LC
1016 * @condition: a C expression for the event to wait for
1017 * @lock: a locked spinlock_t, which will be released before schedule()
1018 * and reacquired afterwards.
1019 *
1020 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
1021 * @condition evaluates to true. The @condition is checked each time
4b1c480b 1022 * the waitqueue @wq_head is woken up.
eed8c02e
LC
1023 *
1024 * wake_up() has to be called after changing any variable that could
1025 * change the result of the wait condition.
1026 *
1027 * This is supposed to be called while holding the lock. The lock is
1028 * dropped before going to sleep and is reacquired afterwards.
1029 */
4b1c480b
IM
1030#define wait_event_lock_irq(wq_head, condition, lock) \
1031do { \
1032 if (condition) \
1033 break; \
1034 __wait_event_lock_irq(wq_head, condition, lock, ); \
eed8c02e
LC
1035} while (0)
1036
1037
4b1c480b
IM
1038#define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
1039 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
1040 spin_unlock_irq(&lock); \
1041 cmd; \
1042 schedule(); \
8fbd88fa 1043 spin_lock_irq(&lock))
eed8c02e
LC
1044
1045/**
1046 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1047 * The condition is checked under the lock. This is expected to
1048 * be called with the lock taken.
4b1c480b 1049 * @wq_head: the waitqueue to wait on
eed8c02e
LC
1050 * @condition: a C expression for the event to wait for
1051 * @lock: a locked spinlock_t, which will be released before cmd and
1052 * schedule() and reacquired afterwards.
1053 * @cmd: a command which is invoked outside the critical section before
1054 * sleep
1055 *
1056 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1057 * @condition evaluates to true or a signal is received. The @condition is
4b1c480b 1058 * checked each time the waitqueue @wq_head is woken up.
eed8c02e
LC
1059 *
1060 * wake_up() has to be called after changing any variable that could
1061 * change the result of the wait condition.
1062 *
1063 * This is supposed to be called while holding the lock. The lock is
1064 * dropped before invoking the cmd and going to sleep and is reacquired
1065 * afterwards.
1066 *
1067 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1068 * and 0 if @condition evaluated to true.
1069 */
4b1c480b
IM
1070#define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
1071({ \
1072 int __ret = 0; \
1073 if (!(condition)) \
1074 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1075 condition, lock, cmd); \
1076 __ret; \
eed8c02e
LC
1077})
1078
1079/**
1080 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1081 * The condition is checked under the lock. This is expected
1082 * to be called with the lock taken.
4b1c480b 1083 * @wq_head: the waitqueue to wait on
eed8c02e
LC
1084 * @condition: a C expression for the event to wait for
1085 * @lock: a locked spinlock_t, which will be released before schedule()
1086 * and reacquired afterwards.
1087 *
1088 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1089 * @condition evaluates to true or signal is received. The @condition is
4b1c480b 1090 * checked each time the waitqueue @wq_head is woken up.
eed8c02e
LC
1091 *
1092 * wake_up() has to be called after changing any variable that could
1093 * change the result of the wait condition.
1094 *
1095 * This is supposed to be called while holding the lock. The lock is
1096 * dropped before going to sleep and is reacquired afterwards.
1097 *
1098 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1099 * and 0 if @condition evaluated to true.
1100 */
4b1c480b
IM
1101#define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
1102({ \
1103 int __ret = 0; \
1104 if (!(condition)) \
1105 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1106 condition, lock,); \
1107 __ret; \
eed8c02e
LC
1108})
1109
25ab0bc3 1110#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
4b1c480b 1111 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
25ab0bc3 1112 state, 0, timeout, \
4b1c480b
IM
1113 spin_unlock_irq(&lock); \
1114 __ret = schedule_timeout(__ret); \
a1dc6852 1115 spin_lock_irq(&lock));
d79ff142
MP
1116
1117/**
fb869b6e
IM
1118 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1119 * true or a timeout elapses. The condition is checked under
1120 * the lock. This is expected to be called with the lock taken.
4b1c480b 1121 * @wq_head: the waitqueue to wait on
d79ff142
MP
1122 * @condition: a C expression for the event to wait for
1123 * @lock: a locked spinlock_t, which will be released before schedule()
1124 * and reacquired afterwards.
1125 * @timeout: timeout, in jiffies
1126 *
1127 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1128 * @condition evaluates to true or signal is received. The @condition is
4b1c480b 1129 * checked each time the waitqueue @wq_head is woken up.
d79ff142
MP
1130 *
1131 * wake_up() has to be called after changing any variable that could
1132 * change the result of the wait condition.
1133 *
1134 * This is supposed to be called while holding the lock. The lock is
1135 * dropped before going to sleep and is reacquired afterwards.
1136 *
1137 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1138 * was interrupted by a signal, and the remaining jiffies otherwise
1139 * if the condition evaluated to true before the timeout elapsed.
1140 */
4b1c480b
IM
1141#define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
1142 timeout) \
1143({ \
1144 long __ret = timeout; \
1145 if (!___wait_cond_timeout(condition)) \
25ab0bc3
NB
1146 __ret = __wait_event_lock_irq_timeout( \
1147 wq_head, condition, lock, timeout, \
1148 TASK_INTERRUPTIBLE); \
1149 __ret; \
1150})
1151
1152#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
1153({ \
1154 long __ret = timeout; \
1155 if (!___wait_cond_timeout(condition)) \
1156 __ret = __wait_event_lock_irq_timeout( \
1157 wq_head, condition, lock, timeout, \
1158 TASK_UNINTERRUPTIBLE); \
4b1c480b 1159 __ret; \
d79ff142
MP
1160})
1161
1da177e4
LT
1162/*
1163 * Waitqueues which are removed from the waitqueue_head at wakeup time
1164 */
9d9d676f 1165void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
11c7aa0d 1166bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
9d9d676f
IM
1167long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1168void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
50816c48
IM
1169long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1170int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1171int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1da177e4 1172
4b1c480b
IM
1173#define DEFINE_WAIT_FUNC(name, function) \
1174 struct wait_queue_entry name = { \
1175 .private = current, \
1176 .func = function, \
2055da97 1177 .entry = LIST_HEAD_INIT((name).entry), \
1da177e4
LT
1178 }
1179
bf368e4e
ED
1180#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1181
4b1c480b
IM
1182#define init_wait(wait) \
1183 do { \
1184 (wait)->private = current; \
1185 (wait)->func = autoremove_wake_function; \
2055da97 1186 INIT_LIST_HEAD(&(wait)->entry); \
4b1c480b 1187 (wait)->flags = 0; \
1da177e4
LT
1188 } while (0)
1189
9b3c4ab3
PZ
1190typedef int (*task_call_f)(struct task_struct *p, void *arg);
1191extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
2beaf328 1192
fb869b6e 1193#endif /* _LINUX_WAIT_H */
This page took 3.595698 seconds and 4 git commands to generate.