1 #ifndef _LINUX_SCHED_IDLE_H
2 #define _LINUX_SCHED_IDLE_H
4 #include <linux/sched.h>
13 extern void wake_up_if_idle(int cpu);
16 * Idle thread specific functions to determine the need_resched
19 #ifdef TIF_POLLING_NRFLAG
21 static inline void __current_set_polling(void)
23 set_thread_flag(TIF_POLLING_NRFLAG);
26 static inline bool __must_check current_set_polling_and_test(void)
28 __current_set_polling();
31 * Polling state must be visible before we test NEED_RESCHED,
32 * paired by resched_curr()
34 smp_mb__after_atomic();
36 return unlikely(tif_need_resched());
39 static inline void __current_clr_polling(void)
41 clear_thread_flag(TIF_POLLING_NRFLAG);
44 static inline bool __must_check current_clr_polling_and_test(void)
46 __current_clr_polling();
49 * Polling state must be visible before we test NEED_RESCHED,
50 * paired by resched_curr()
52 smp_mb__after_atomic();
54 return unlikely(tif_need_resched());
58 static inline void __current_set_polling(void) { }
59 static inline void __current_clr_polling(void) { }
61 static inline bool __must_check current_set_polling_and_test(void)
63 return unlikely(tif_need_resched());
65 static inline bool __must_check current_clr_polling_and_test(void)
67 return unlikely(tif_need_resched());
71 static inline void current_clr_polling(void)
73 __current_clr_polling();
76 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
77 * Once the bit is cleared, we'll get IPIs with every new
78 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
81 smp_mb(); /* paired with resched_curr() */
83 preempt_fold_need_resched();
86 #endif /* _LINUX_SCHED_IDLE_H */