]> Git Repo - linux.git/blob - include/linux/sched/idle.h
Merge tag 'fbdev-v4.14' of git://github.com/bzolnier/linux
[linux.git] / include / linux / sched / idle.h
1 #ifndef _LINUX_SCHED_IDLE_H
2 #define _LINUX_SCHED_IDLE_H
3
4 #include <linux/sched.h>
5
6 enum cpu_idle_type {
7         CPU_IDLE,
8         CPU_NOT_IDLE,
9         CPU_NEWLY_IDLE,
10         CPU_MAX_IDLE_TYPES
11 };
12
13 extern void wake_up_if_idle(int cpu);
14
15 /*
16  * Idle thread specific functions to determine the need_resched
17  * polling state.
18  */
19 #ifdef TIF_POLLING_NRFLAG
20
21 static inline void __current_set_polling(void)
22 {
23         set_thread_flag(TIF_POLLING_NRFLAG);
24 }
25
26 static inline bool __must_check current_set_polling_and_test(void)
27 {
28         __current_set_polling();
29
30         /*
31          * Polling state must be visible before we test NEED_RESCHED,
32          * paired by resched_curr()
33          */
34         smp_mb__after_atomic();
35
36         return unlikely(tif_need_resched());
37 }
38
39 static inline void __current_clr_polling(void)
40 {
41         clear_thread_flag(TIF_POLLING_NRFLAG);
42 }
43
44 static inline bool __must_check current_clr_polling_and_test(void)
45 {
46         __current_clr_polling();
47
48         /*
49          * Polling state must be visible before we test NEED_RESCHED,
50          * paired by resched_curr()
51          */
52         smp_mb__after_atomic();
53
54         return unlikely(tif_need_resched());
55 }
56
57 #else
58 static inline void __current_set_polling(void) { }
59 static inline void __current_clr_polling(void) { }
60
61 static inline bool __must_check current_set_polling_and_test(void)
62 {
63         return unlikely(tif_need_resched());
64 }
65 static inline bool __must_check current_clr_polling_and_test(void)
66 {
67         return unlikely(tif_need_resched());
68 }
69 #endif
70
71 static inline void current_clr_polling(void)
72 {
73         __current_clr_polling();
74
75         /*
76          * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
77          * Once the bit is cleared, we'll get IPIs with every new
78          * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
79          * fold.
80          */
81         smp_mb(); /* paired with resched_curr() */
82
83         preempt_fold_need_resched();
84 }
85
86 #endif /* _LINUX_SCHED_IDLE_H */
This page took 0.038074 seconds and 4 git commands to generate.