]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef LINUX_HARDIRQ_H |
2 | #define LINUX_HARDIRQ_H | |
3 | ||
67bc4eb0 | 4 | #include <linux/preempt.h> |
fbb9ce95 | 5 | #include <linux/lockdep.h> |
6a60dd12 | 6 | #include <linux/ftrace_irq.h> |
1da177e4 | 7 | #include <asm/hardirq.h> |
1da177e4 LT |
8 | |
9 | /* | |
10 | * We put the hardirq and softirq counter into the preemption | |
11 | * counter. The bitmask has the following meaning: | |
12 | * | |
13 | * - bits 0-7 are the preemption count (max preemption depth: 256) | |
14 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | |
15 | * | |
5a5fb7db SR |
16 | * The hardirq count can in theory reach the same as NR_IRQS. |
17 | * In reality, the number of nested IRQS is limited to the stack | |
18 | * size as well. For archs with over 1000 IRQS it is not practical | |
19 | * to expect that they will all nest. We give a max of 10 bits for | |
20 | * hardirq nesting. An arch may choose to give less than 10 bits. | |
21 | * m68k expects it to be 8. | |
1da177e4 | 22 | * |
5a5fb7db SR |
23 | * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) |
24 | * - bit 26 is the NMI_MASK | |
25 | * - bit 28 is the PREEMPT_ACTIVE flag | |
1da177e4 LT |
26 | * |
27 | * PREEMPT_MASK: 0x000000ff | |
28 | * SOFTIRQ_MASK: 0x0000ff00 | |
5a5fb7db SR |
29 | * HARDIRQ_MASK: 0x03ff0000 |
30 | * NMI_MASK: 0x04000000 | |
1da177e4 LT |
31 | */ |
32 | #define PREEMPT_BITS 8 | |
33 | #define SOFTIRQ_BITS 8 | |
5a5fb7db | 34 | #define NMI_BITS 1 |
1da177e4 | 35 | |
5a5fb7db | 36 | #define MAX_HARDIRQ_BITS 10 |
23d0b8b0 | 37 | |
5a5fb7db SR |
38 | #ifndef HARDIRQ_BITS |
39 | # define HARDIRQ_BITS MAX_HARDIRQ_BITS | |
23d0b8b0 EB |
40 | #endif |
41 | ||
5a5fb7db SR |
42 | #if HARDIRQ_BITS > MAX_HARDIRQ_BITS |
43 | #error HARDIRQ_BITS too high! | |
1da177e4 LT |
44 | #endif |
45 | ||
46 | #define PREEMPT_SHIFT 0 | |
47 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | |
48 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | |
5a5fb7db | 49 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) |
1da177e4 LT |
50 | |
51 | #define __IRQ_MASK(x) ((1UL << (x))-1) | |
52 | ||
53 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | |
1da177e4 | 54 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) |
8f28e8fa | 55 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) |
5a5fb7db | 56 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) |
1da177e4 LT |
57 | |
58 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | |
59 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | |
60 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | |
5a5fb7db | 61 | #define NMI_OFFSET (1UL << NMI_SHIFT) |
1da177e4 | 62 | |
75e1056f VP |
63 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
64 | ||
8e5b59a2 AB |
65 | #ifndef PREEMPT_ACTIVE |
66 | #define PREEMPT_ACTIVE_BITS 1 | |
67 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) | |
68 | #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) | |
69 | #endif | |
70 | ||
5a5fb7db | 71 | #if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) |
8f28e8fa PBG |
72 | #error PREEMPT_ACTIVE is too low! |
73 | #endif | |
74 | ||
1da177e4 LT |
75 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
76 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) | |
5a5fb7db SR |
77 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
78 | | NMI_MASK)) | |
1da177e4 LT |
79 | |
80 | /* | |
81 | * Are we doing bottom half or hardware interrupt processing? | |
82 | * Are we in a softirq context? Interrupt context? | |
75e1056f VP |
83 | * in_softirq - Are we currently processing softirq or have bh disabled? |
84 | * in_serving_softirq - Are we currently processing softirq? | |
1da177e4 LT |
85 | */ |
86 | #define in_irq() (hardirq_count()) | |
87 | #define in_softirq() (softirq_count()) | |
88 | #define in_interrupt() (irq_count()) | |
75e1056f | 89 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
1da177e4 | 90 | |
375b38b4 SR |
91 | /* |
92 | * Are we in NMI context? | |
93 | */ | |
5a5fb7db | 94 | #define in_nmi() (preempt_count() & NMI_MASK) |
375b38b4 | 95 | |
7fe19da4 AB |
96 | #if defined(CONFIG_PREEMPT) |
97 | # define PREEMPT_CHECK_OFFSET 1 | |
98 | #else | |
8e3e076c LT |
99 | # define PREEMPT_CHECK_OFFSET 0 |
100 | #endif | |
101 | ||
8c703d35 JC |
102 | /* |
103 | * Are we running in atomic context? WARNING: this macro cannot | |
104 | * always detect atomic context; in particular, it cannot know about | |
105 | * held spinlocks in non-preemptible kernels. Thus it should not be | |
106 | * used in the general case to determine whether sleeping is possible. | |
107 | * Do not use in_atomic() in driver code. | |
108 | */ | |
4ba8216c | 109 | #define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) |
4da1ce6d IM |
110 | |
111 | /* | |
112 | * Check whether we were atomic before we did preempt_disable(): | |
8e3e076c | 113 | * (used by the scheduler, *after* releasing the kernel lock) |
4da1ce6d IM |
114 | */ |
115 | #define in_atomic_preempt_off() \ | |
116 | ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) | |
117 | ||
1da177e4 LT |
118 | #ifdef CONFIG_PREEMPT |
119 | # define preemptible() (preempt_count() == 0 && !irqs_disabled()) | |
120 | # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) | |
121 | #else | |
122 | # define preemptible() 0 | |
123 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET | |
124 | #endif | |
125 | ||
3aa551c9 | 126 | #if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) |
1da177e4 LT |
127 | extern void synchronize_irq(unsigned int irq); |
128 | #else | |
129 | # define synchronize_irq(irq) barrier() | |
130 | #endif | |
131 | ||
f037360f AV |
132 | struct task_struct; |
133 | ||
b52bfee4 | 134 | #if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) |
1da177e4 LT |
135 | static inline void account_system_vtime(struct task_struct *tsk) |
136 | { | |
137 | } | |
e1e10a26 VP |
138 | #else |
139 | extern void account_system_vtime(struct task_struct *tsk); | |
1da177e4 LT |
140 | #endif |
141 | ||
b560d8ad | 142 | #if defined(CONFIG_NO_HZ) |
a57eb940 | 143 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
9b1d82fa PM |
144 | extern void rcu_enter_nohz(void); |
145 | extern void rcu_exit_nohz(void); | |
146 | ||
147 | static inline void rcu_irq_enter(void) | |
148 | { | |
149 | rcu_exit_nohz(); | |
150 | } | |
151 | ||
152 | static inline void rcu_irq_exit(void) | |
153 | { | |
154 | rcu_enter_nohz(); | |
155 | } | |
156 | ||
157 | static inline void rcu_nmi_enter(void) | |
158 | { | |
159 | } | |
160 | ||
161 | static inline void rcu_nmi_exit(void) | |
162 | { | |
163 | } | |
164 | ||
165 | #else | |
2232c2d8 SR |
166 | extern void rcu_irq_enter(void); |
167 | extern void rcu_irq_exit(void); | |
64db4cff PM |
168 | extern void rcu_nmi_enter(void); |
169 | extern void rcu_nmi_exit(void); | |
9b1d82fa | 170 | #endif |
2232c2d8 SR |
171 | #else |
172 | # define rcu_irq_enter() do { } while (0) | |
173 | # define rcu_irq_exit() do { } while (0) | |
64db4cff PM |
174 | # define rcu_nmi_enter() do { } while (0) |
175 | # define rcu_nmi_exit() do { } while (0) | |
b560d8ad | 176 | #endif /* #if defined(CONFIG_NO_HZ) */ |
2232c2d8 | 177 | |
de30a2b3 IM |
178 | /* |
179 | * It is safe to do non-atomic ops on ->hardirq_context, | |
180 | * because NMI handlers may not preempt and the ops are | |
181 | * always balanced, so the interrupted value of ->hardirq_context | |
182 | * will always be restored. | |
183 | */ | |
79bf2bb3 TG |
184 | #define __irq_enter() \ |
185 | do { \ | |
186 | account_system_vtime(current); \ | |
187 | add_preempt_count(HARDIRQ_OFFSET); \ | |
188 | trace_hardirq_enter(); \ | |
189 | } while (0) | |
190 | ||
191 | /* | |
192 | * Enter irq context (on NO_HZ, update jiffies): | |
193 | */ | |
dde4b2b5 | 194 | extern void irq_enter(void); |
de30a2b3 IM |
195 | |
196 | /* | |
197 | * Exit irq context without processing softirqs: | |
198 | */ | |
199 | #define __irq_exit() \ | |
200 | do { \ | |
201 | trace_hardirq_exit(); \ | |
202 | account_system_vtime(current); \ | |
203 | sub_preempt_count(HARDIRQ_OFFSET); \ | |
1da177e4 LT |
204 | } while (0) |
205 | ||
de30a2b3 IM |
206 | /* |
207 | * Exit irq context and process softirqs if needed: | |
208 | */ | |
1da177e4 LT |
209 | extern void irq_exit(void); |
210 | ||
2a7b8df0 SR |
211 | #define nmi_enter() \ |
212 | do { \ | |
213 | ftrace_nmi_enter(); \ | |
214 | BUG_ON(in_nmi()); \ | |
215 | add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ | |
216 | lockdep_off(); \ | |
217 | rcu_nmi_enter(); \ | |
218 | trace_hardirq_enter(); \ | |
17666f02 | 219 | } while (0) |
5f34fe1c | 220 | |
2a7b8df0 SR |
221 | #define nmi_exit() \ |
222 | do { \ | |
223 | trace_hardirq_exit(); \ | |
224 | rcu_nmi_exit(); \ | |
225 | lockdep_on(); \ | |
226 | BUG_ON(!in_nmi()); \ | |
227 | sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ | |
228 | ftrace_nmi_exit(); \ | |
17666f02 | 229 | } while (0) |
de30a2b3 | 230 | |
1da177e4 | 231 | #endif /* LINUX_HARDIRQ_H */ |