]>
Commit | Line | Data |
---|---|---|
767a67b0 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/kernel/softirq.c | |
4 | * | |
5 | * Copyright (C) 1992 Linus Torvalds | |
6 | * | |
b10db7f0 | 7 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
1da177e4 LT |
8 | */ |
9 | ||
40322764 JP |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | ||
9984de1a | 12 | #include <linux/export.h> |
1da177e4 LT |
13 | #include <linux/kernel_stat.h> |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/notifier.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/cpu.h> | |
83144186 | 20 | #include <linux/freezer.h> |
1da177e4 LT |
21 | #include <linux/kthread.h> |
22 | #include <linux/rcupdate.h> | |
7e49fcce | 23 | #include <linux/ftrace.h> |
78eef01b | 24 | #include <linux/smp.h> |
3e339b5d | 25 | #include <linux/smpboot.h> |
79bf2bb3 | 26 | #include <linux/tick.h> |
d532676c | 27 | #include <linux/irq.h> |
a0e39ed3 HC |
28 | |
29 | #define CREATE_TRACE_POINTS | |
ad8d75ff | 30 | #include <trace/events/irq.h> |
1da177e4 | 31 | |
1da177e4 LT |
32 | /* |
33 | - No shared variables, all the data are CPU local. | |
34 | - If a softirq needs serialization, let it serialize itself | |
35 | by its own spinlocks. | |
36 | - Even if softirq is serialized, only local cpu is marked for | |
37 | execution. Hence, we get something sort of weak cpu binding. | |
38 | Though it is still not clear, will it result in better locality | |
39 | or will not. | |
40 | ||
41 | Examples: | |
42 | - NET RX softirq. It is multithreaded and does not require | |
43 | any global serialization. | |
44 | - NET TX softirq. It kicks software netdevice queues, hence | |
45 | it is logically serialized per device, but this serialization | |
46 | is invisible to common code. | |
47 | - Tasklets: serialized wrt itself. | |
48 | */ | |
49 | ||
50 | #ifndef __ARCH_IRQ_STAT | |
0f6f47ba FW |
51 | DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); |
52 | EXPORT_PER_CPU_SYMBOL(irq_stat); | |
1da177e4 LT |
53 | #endif |
54 | ||
978b0116 | 55 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
1da177e4 | 56 | |
4dd53d89 | 57 | DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
1da177e4 | 58 | |
ce85b4f2 | 59 | const char * const softirq_to_name[NR_SOFTIRQS] = { |
f660f606 | 60 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", |
09223371 | 61 | "TASKLET", "SCHED", "HRTIMER", "RCU" |
5d592b44 JB |
62 | }; |
63 | ||
1da177e4 LT |
64 | /* |
65 | * we cannot loop indefinitely here to avoid userspace starvation, | |
66 | * but we also don't want to introduce a worst case 1/HZ latency | |
67 | * to the pending events, so lets the scheduler to balance | |
68 | * the softirq load for us. | |
69 | */ | |
676cb02d | 70 | static void wakeup_softirqd(void) |
1da177e4 LT |
71 | { |
72 | /* Interrupts are disabled: no need to stop preemption */ | |
909ea964 | 73 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
1da177e4 LT |
74 | |
75 | if (tsk && tsk->state != TASK_RUNNING) | |
76 | wake_up_process(tsk); | |
77 | } | |
78 | ||
4cd13c21 ED |
79 | /* |
80 | * If ksoftirqd is scheduled, we do not want to process pending softirqs | |
3c53776e LT |
81 | * right now. Let ksoftirqd handle this at its own rate, to get fairness, |
82 | * unless we're doing some of the synchronous softirqs. | |
4cd13c21 | 83 | */ |
3c53776e LT |
84 | #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) |
85 | static bool ksoftirqd_running(unsigned long pending) | |
4cd13c21 ED |
86 | { |
87 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); | |
88 | ||
3c53776e LT |
89 | if (pending & SOFTIRQ_NOW_MASK) |
90 | return false; | |
1342d808 MK |
91 | return tsk && (tsk->state == TASK_RUNNING) && |
92 | !__kthread_should_park(tsk); | |
4cd13c21 ED |
93 | } |
94 | ||
75e1056f VP |
95 | /* |
96 | * preempt_count and SOFTIRQ_OFFSET usage: | |
97 | * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving | |
98 | * softirq processing. | |
99 | * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) | |
100 | * on local_bh_disable or local_bh_enable. | |
101 | * This lets us distinguish between whether we are currently processing | |
102 | * softirq and whether we just have bh disabled. | |
103 | */ | |
104 | ||
de30a2b3 IM |
105 | /* |
106 | * This one is for softirq.c-internal use, | |
107 | * where hardirqs are disabled legitimately: | |
108 | */ | |
3c829c36 | 109 | #ifdef CONFIG_TRACE_IRQFLAGS |
0bd3a173 | 110 | void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
de30a2b3 IM |
111 | { |
112 | unsigned long flags; | |
113 | ||
114 | WARN_ON_ONCE(in_irq()); | |
115 | ||
116 | raw_local_irq_save(flags); | |
7e49fcce | 117 | /* |
bdb43806 | 118 | * The preempt tracer hooks into preempt_count_add and will break |
7e49fcce SR |
119 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET |
120 | * is set and before current->softirq_enabled is cleared. | |
121 | * We must manually increment preempt_count here and manually | |
122 | * call the trace_preempt_off later. | |
123 | */ | |
bdb43806 | 124 | __preempt_count_add(cnt); |
de30a2b3 IM |
125 | /* |
126 | * Were softirqs turned off above: | |
127 | */ | |
9ea4c380 | 128 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
de30a2b3 IM |
129 | trace_softirqs_off(ip); |
130 | raw_local_irq_restore(flags); | |
7e49fcce | 131 | |
0f1ba9a2 HC |
132 | if (preempt_count() == cnt) { |
133 | #ifdef CONFIG_DEBUG_PREEMPT | |
f904f582 | 134 | current->preempt_disable_ip = get_lock_parent_ip(); |
0f1ba9a2 | 135 | #endif |
f904f582 | 136 | trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); |
0f1ba9a2 | 137 | } |
de30a2b3 | 138 | } |
0bd3a173 | 139 | EXPORT_SYMBOL(__local_bh_disable_ip); |
3c829c36 | 140 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
de30a2b3 | 141 | |
75e1056f VP |
142 | static void __local_bh_enable(unsigned int cnt) |
143 | { | |
f71b74bc | 144 | lockdep_assert_irqs_disabled(); |
75e1056f | 145 | |
1a63dcd8 JFG |
146 | if (preempt_count() == cnt) |
147 | trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); | |
148 | ||
9ea4c380 | 149 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
d2e08473 | 150 | trace_softirqs_on(_RET_IP_); |
1a63dcd8 JFG |
151 | |
152 | __preempt_count_sub(cnt); | |
75e1056f VP |
153 | } |
154 | ||
de30a2b3 | 155 | /* |
c3442697 | 156 | * Special-case - softirqs can safely be enabled by __do_softirq(), |
de30a2b3 IM |
157 | * without processing still-pending softirqs: |
158 | */ | |
159 | void _local_bh_enable(void) | |
160 | { | |
5d60d3e7 | 161 | WARN_ON_ONCE(in_irq()); |
75e1056f | 162 | __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
de30a2b3 | 163 | } |
de30a2b3 IM |
164 | EXPORT_SYMBOL(_local_bh_enable); |
165 | ||
0bd3a173 | 166 | void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
de30a2b3 | 167 | { |
f71b74bc FW |
168 | WARN_ON_ONCE(in_irq()); |
169 | lockdep_assert_irqs_enabled(); | |
3c829c36 | 170 | #ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d | 171 | local_irq_disable(); |
3c829c36 | 172 | #endif |
de30a2b3 IM |
173 | /* |
174 | * Are softirqs going to be turned on now: | |
175 | */ | |
75e1056f | 176 | if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
0f476b6d | 177 | trace_softirqs_on(ip); |
de30a2b3 IM |
178 | /* |
179 | * Keep preemption disabled until we are done with | |
180 | * softirq processing: | |
ce85b4f2 | 181 | */ |
0bd3a173 | 182 | preempt_count_sub(cnt - 1); |
de30a2b3 | 183 | |
0bed698a FW |
184 | if (unlikely(!in_interrupt() && local_softirq_pending())) { |
185 | /* | |
186 | * Run softirq if any pending. And do it in its own stack | |
187 | * as we may be calling this deep in a task call stack already. | |
188 | */ | |
de30a2b3 | 189 | do_softirq(); |
0bed698a | 190 | } |
de30a2b3 | 191 | |
bdb43806 | 192 | preempt_count_dec(); |
3c829c36 | 193 | #ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d | 194 | local_irq_enable(); |
3c829c36 | 195 | #endif |
de30a2b3 IM |
196 | preempt_check_resched(); |
197 | } | |
0bd3a173 | 198 | EXPORT_SYMBOL(__local_bh_enable_ip); |
de30a2b3 | 199 | |
1da177e4 | 200 | /* |
34376a50 BG |
201 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, |
202 | * but break the loop if need_resched() is set or after 2 ms. | |
203 | * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in | |
204 | * certain cases, such as stop_machine(), jiffies may cease to | |
205 | * increment and so we need the MAX_SOFTIRQ_RESTART limit as | |
206 | * well to make sure we eventually return from this method. | |
1da177e4 | 207 | * |
c10d7367 | 208 | * These limits have been established via experimentation. |
1da177e4 LT |
209 | * The two things to balance is latency against fairness - |
210 | * we want to handle softirqs as soon as possible, but they | |
211 | * should not be able to lock up the box. | |
212 | */ | |
c10d7367 | 213 | #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) |
34376a50 | 214 | #define MAX_SOFTIRQ_RESTART 10 |
1da177e4 | 215 | |
f1a83e65 PZ |
216 | #ifdef CONFIG_TRACE_IRQFLAGS |
217 | /* | |
f1a83e65 PZ |
218 | * When we run softirqs from irq_exit() and thus on the hardirq stack we need |
219 | * to keep the lockdep irq context tracking as tight as possible in order to | |
220 | * not miss-qualify lock contexts and miss possible deadlocks. | |
221 | */ | |
f1a83e65 | 222 | |
5c4853b6 | 223 | static inline bool lockdep_softirq_start(void) |
f1a83e65 | 224 | { |
5c4853b6 | 225 | bool in_hardirq = false; |
f1a83e65 | 226 | |
5c4853b6 FW |
227 | if (trace_hardirq_context(current)) { |
228 | in_hardirq = true; | |
2502ec37 | 229 | lockdep_hardirq_exit(); |
5c4853b6 FW |
230 | } |
231 | ||
f1a83e65 | 232 | lockdep_softirq_enter(); |
5c4853b6 FW |
233 | |
234 | return in_hardirq; | |
f1a83e65 PZ |
235 | } |
236 | ||
5c4853b6 | 237 | static inline void lockdep_softirq_end(bool in_hardirq) |
f1a83e65 PZ |
238 | { |
239 | lockdep_softirq_exit(); | |
5c4853b6 FW |
240 | |
241 | if (in_hardirq) | |
2502ec37 | 242 | lockdep_hardirq_enter(); |
f1a83e65 | 243 | } |
f1a83e65 | 244 | #else |
5c4853b6 FW |
245 | static inline bool lockdep_softirq_start(void) { return false; } |
246 | static inline void lockdep_softirq_end(bool in_hardirq) { } | |
f1a83e65 PZ |
247 | #endif |
248 | ||
be7635e7 | 249 | asmlinkage __visible void __softirq_entry __do_softirq(void) |
1da177e4 | 250 | { |
c10d7367 | 251 | unsigned long end = jiffies + MAX_SOFTIRQ_TIME; |
907aed48 | 252 | unsigned long old_flags = current->flags; |
34376a50 | 253 | int max_restart = MAX_SOFTIRQ_RESTART; |
f1a83e65 | 254 | struct softirq_action *h; |
5c4853b6 | 255 | bool in_hardirq; |
f1a83e65 | 256 | __u32 pending; |
2e702b9f | 257 | int softirq_bit; |
907aed48 MG |
258 | |
259 | /* | |
e45506ac YL |
260 | * Mask out PF_MEMALLOC as the current task context is borrowed for the |
261 | * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC | |
262 | * again if the socket is related to swapping. | |
907aed48 MG |
263 | */ |
264 | current->flags &= ~PF_MEMALLOC; | |
1da177e4 LT |
265 | |
266 | pending = local_softirq_pending(); | |
6a61671b | 267 | account_irq_enter_time(current); |
829035fd | 268 | |
0bd3a173 | 269 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
5c4853b6 | 270 | in_hardirq = lockdep_softirq_start(); |
1da177e4 | 271 | |
1da177e4 LT |
272 | restart: |
273 | /* Reset the pending bitmask before enabling irqs */ | |
3f74478b | 274 | set_softirq_pending(0); |
1da177e4 | 275 | |
c70f5d66 | 276 | local_irq_enable(); |
1da177e4 LT |
277 | |
278 | h = softirq_vec; | |
279 | ||
2e702b9f JP |
280 | while ((softirq_bit = ffs(pending))) { |
281 | unsigned int vec_nr; | |
282 | int prev_count; | |
283 | ||
284 | h += softirq_bit - 1; | |
285 | ||
286 | vec_nr = h - softirq_vec; | |
287 | prev_count = preempt_count(); | |
288 | ||
289 | kstat_incr_softirqs_this_cpu(vec_nr); | |
290 | ||
291 | trace_softirq_entry(vec_nr); | |
292 | h->action(h); | |
293 | trace_softirq_exit(vec_nr); | |
294 | if (unlikely(prev_count != preempt_count())) { | |
40322764 | 295 | pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", |
2e702b9f JP |
296 | vec_nr, softirq_to_name[vec_nr], h->action, |
297 | prev_count, preempt_count()); | |
298 | preempt_count_set(prev_count); | |
1da177e4 LT |
299 | } |
300 | h++; | |
2e702b9f JP |
301 | pending >>= softirq_bit; |
302 | } | |
1da177e4 | 303 | |
d28139c4 PM |
304 | if (__this_cpu_read(ksoftirqd) == current) |
305 | rcu_softirq_qs(); | |
c70f5d66 | 306 | local_irq_disable(); |
1da177e4 LT |
307 | |
308 | pending = local_softirq_pending(); | |
c10d7367 | 309 | if (pending) { |
34376a50 BG |
310 | if (time_before(jiffies, end) && !need_resched() && |
311 | --max_restart) | |
c10d7367 | 312 | goto restart; |
1da177e4 | 313 | |
1da177e4 | 314 | wakeup_softirqd(); |
c10d7367 | 315 | } |
1da177e4 | 316 | |
5c4853b6 | 317 | lockdep_softirq_end(in_hardirq); |
6a61671b | 318 | account_irq_exit_time(current); |
75e1056f | 319 | __local_bh_enable(SOFTIRQ_OFFSET); |
5d60d3e7 | 320 | WARN_ON_ONCE(in_interrupt()); |
717a94b5 | 321 | current_restore_flags(old_flags, PF_MEMALLOC); |
1da177e4 LT |
322 | } |
323 | ||
722a9f92 | 324 | asmlinkage __visible void do_softirq(void) |
1da177e4 LT |
325 | { |
326 | __u32 pending; | |
327 | unsigned long flags; | |
328 | ||
329 | if (in_interrupt()) | |
330 | return; | |
331 | ||
332 | local_irq_save(flags); | |
333 | ||
334 | pending = local_softirq_pending(); | |
335 | ||
3c53776e | 336 | if (pending && !ksoftirqd_running(pending)) |
7d65f4a6 | 337 | do_softirq_own_stack(); |
1da177e4 LT |
338 | |
339 | local_irq_restore(flags); | |
340 | } | |
341 | ||
dde4b2b5 IM |
342 | /* |
343 | * Enter an interrupt context. | |
344 | */ | |
345 | void irq_enter(void) | |
346 | { | |
64db4cff | 347 | rcu_irq_enter(); |
0a8a2e78 | 348 | if (is_idle_task(current) && !in_interrupt()) { |
d267f87f VP |
349 | /* |
350 | * Prevent raise_softirq from needlessly waking up ksoftirqd | |
351 | * here, as softirq will be serviced on return from interrupt. | |
352 | */ | |
353 | local_bh_disable(); | |
5acac1be | 354 | tick_irq_enter(); |
d267f87f VP |
355 | _local_bh_enable(); |
356 | } | |
357 | ||
358 | __irq_enter(); | |
dde4b2b5 IM |
359 | } |
360 | ||
8d32a307 TG |
361 | static inline void invoke_softirq(void) |
362 | { | |
3c53776e | 363 | if (ksoftirqd_running(local_softirq_pending())) |
4cd13c21 ED |
364 | return; |
365 | ||
ded79754 | 366 | if (!force_irqthreads) { |
cc1f0274 | 367 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
ded79754 FW |
368 | /* |
369 | * We can safely execute softirq on the current stack if | |
370 | * it is the irq stack, because it should be near empty | |
cc1f0274 FW |
371 | * at this stage. |
372 | */ | |
373 | __do_softirq(); | |
374 | #else | |
375 | /* | |
376 | * Otherwise, irq_exit() is called on the task stack that can | |
377 | * be potentially deep already. So call softirq in its own stack | |
378 | * to prevent from any overrun. | |
ded79754 | 379 | */ |
be6e1016 | 380 | do_softirq_own_stack(); |
cc1f0274 | 381 | #endif |
ded79754 | 382 | } else { |
8d32a307 | 383 | wakeup_softirqd(); |
ded79754 | 384 | } |
8d32a307 | 385 | } |
1da177e4 | 386 | |
67826eae FW |
387 | static inline void tick_irq_exit(void) |
388 | { | |
389 | #ifdef CONFIG_NO_HZ_COMMON | |
390 | int cpu = smp_processor_id(); | |
391 | ||
392 | /* Make sure that timer wheel updates are propagated */ | |
393 | if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { | |
0a0e0829 | 394 | if (!in_irq()) |
67826eae FW |
395 | tick_nohz_irq_exit(); |
396 | } | |
397 | #endif | |
398 | } | |
399 | ||
1da177e4 LT |
400 | /* |
401 | * Exit an interrupt context. Process softirqs if needed and possible: | |
402 | */ | |
403 | void irq_exit(void) | |
404 | { | |
74eed016 | 405 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
4cd5d111 | 406 | local_irq_disable(); |
74eed016 | 407 | #else |
f71b74bc | 408 | lockdep_assert_irqs_disabled(); |
74eed016 | 409 | #endif |
6a61671b | 410 | account_irq_exit_time(current); |
bdb43806 | 411 | preempt_count_sub(HARDIRQ_OFFSET); |
1da177e4 LT |
412 | if (!in_interrupt() && local_softirq_pending()) |
413 | invoke_softirq(); | |
79bf2bb3 | 414 | |
67826eae | 415 | tick_irq_exit(); |
416eb33c | 416 | rcu_irq_exit(); |
2502ec37 TG |
417 | /* must be last! */ |
418 | lockdep_hardirq_exit(); | |
1da177e4 LT |
419 | } |
420 | ||
421 | /* | |
422 | * This function must run with irqs disabled! | |
423 | */ | |
7ad5b3a5 | 424 | inline void raise_softirq_irqoff(unsigned int nr) |
1da177e4 LT |
425 | { |
426 | __raise_softirq_irqoff(nr); | |
427 | ||
428 | /* | |
429 | * If we're in an interrupt or softirq, we're done | |
430 | * (this also catches softirq-disabled code). We will | |
431 | * actually run the softirq once we return from | |
432 | * the irq or softirq. | |
433 | * | |
434 | * Otherwise we wake up ksoftirqd to make sure we | |
435 | * schedule the softirq soon. | |
436 | */ | |
437 | if (!in_interrupt()) | |
438 | wakeup_softirqd(); | |
439 | } | |
440 | ||
7ad5b3a5 | 441 | void raise_softirq(unsigned int nr) |
1da177e4 LT |
442 | { |
443 | unsigned long flags; | |
444 | ||
445 | local_irq_save(flags); | |
446 | raise_softirq_irqoff(nr); | |
447 | local_irq_restore(flags); | |
448 | } | |
449 | ||
f069686e SR |
450 | void __raise_softirq_irqoff(unsigned int nr) |
451 | { | |
452 | trace_softirq_raise(nr); | |
453 | or_softirq_pending(1UL << nr); | |
454 | } | |
455 | ||
962cf36c | 456 | void open_softirq(int nr, void (*action)(struct softirq_action *)) |
1da177e4 | 457 | { |
1da177e4 LT |
458 | softirq_vec[nr].action = action; |
459 | } | |
460 | ||
9ba5f005 PZ |
461 | /* |
462 | * Tasklets | |
463 | */ | |
ce85b4f2 | 464 | struct tasklet_head { |
48f20a9a OJ |
465 | struct tasklet_struct *head; |
466 | struct tasklet_struct **tail; | |
1da177e4 LT |
467 | }; |
468 | ||
4620b49f VN |
469 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
470 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | |
1da177e4 | 471 | |
6498ddad IM |
472 | static void __tasklet_schedule_common(struct tasklet_struct *t, |
473 | struct tasklet_head __percpu *headp, | |
474 | unsigned int softirq_nr) | |
1da177e4 | 475 | { |
6498ddad | 476 | struct tasklet_head *head; |
1da177e4 LT |
477 | unsigned long flags; |
478 | ||
479 | local_irq_save(flags); | |
6498ddad | 480 | head = this_cpu_ptr(headp); |
48f20a9a | 481 | t->next = NULL; |
6498ddad IM |
482 | *head->tail = t; |
483 | head->tail = &(t->next); | |
484 | raise_softirq_irqoff(softirq_nr); | |
1da177e4 LT |
485 | local_irq_restore(flags); |
486 | } | |
6498ddad IM |
487 | |
488 | void __tasklet_schedule(struct tasklet_struct *t) | |
489 | { | |
490 | __tasklet_schedule_common(t, &tasklet_vec, | |
491 | TASKLET_SOFTIRQ); | |
492 | } | |
1da177e4 LT |
493 | EXPORT_SYMBOL(__tasklet_schedule); |
494 | ||
7ad5b3a5 | 495 | void __tasklet_hi_schedule(struct tasklet_struct *t) |
1da177e4 | 496 | { |
6498ddad IM |
497 | __tasklet_schedule_common(t, &tasklet_hi_vec, |
498 | HI_SOFTIRQ); | |
1da177e4 | 499 | } |
1da177e4 LT |
500 | EXPORT_SYMBOL(__tasklet_hi_schedule); |
501 | ||
82b691be IM |
502 | static void tasklet_action_common(struct softirq_action *a, |
503 | struct tasklet_head *tl_head, | |
504 | unsigned int softirq_nr) | |
1da177e4 LT |
505 | { |
506 | struct tasklet_struct *list; | |
507 | ||
508 | local_irq_disable(); | |
82b691be IM |
509 | list = tl_head->head; |
510 | tl_head->head = NULL; | |
511 | tl_head->tail = &tl_head->head; | |
1da177e4 LT |
512 | local_irq_enable(); |
513 | ||
514 | while (list) { | |
515 | struct tasklet_struct *t = list; | |
516 | ||
517 | list = list->next; | |
518 | ||
519 | if (tasklet_trylock(t)) { | |
520 | if (!atomic_read(&t->count)) { | |
ce85b4f2 JP |
521 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, |
522 | &t->state)) | |
1da177e4 LT |
523 | BUG(); |
524 | t->func(t->data); | |
525 | tasklet_unlock(t); | |
526 | continue; | |
527 | } | |
528 | tasklet_unlock(t); | |
529 | } | |
530 | ||
531 | local_irq_disable(); | |
48f20a9a | 532 | t->next = NULL; |
82b691be IM |
533 | *tl_head->tail = t; |
534 | tl_head->tail = &t->next; | |
535 | __raise_softirq_irqoff(softirq_nr); | |
1da177e4 LT |
536 | local_irq_enable(); |
537 | } | |
538 | } | |
539 | ||
82b691be | 540 | static __latent_entropy void tasklet_action(struct softirq_action *a) |
1da177e4 | 541 | { |
82b691be IM |
542 | tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); |
543 | } | |
1da177e4 | 544 | |
82b691be IM |
545 | static __latent_entropy void tasklet_hi_action(struct softirq_action *a) |
546 | { | |
547 | tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); | |
1da177e4 LT |
548 | } |
549 | ||
1da177e4 LT |
550 | void tasklet_init(struct tasklet_struct *t, |
551 | void (*func)(unsigned long), unsigned long data) | |
552 | { | |
553 | t->next = NULL; | |
554 | t->state = 0; | |
555 | atomic_set(&t->count, 0); | |
556 | t->func = func; | |
557 | t->data = data; | |
558 | } | |
1da177e4 LT |
559 | EXPORT_SYMBOL(tasklet_init); |
560 | ||
561 | void tasklet_kill(struct tasklet_struct *t) | |
562 | { | |
563 | if (in_interrupt()) | |
40322764 | 564 | pr_notice("Attempt to kill tasklet from interrupt\n"); |
1da177e4 LT |
565 | |
566 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { | |
79d381c9 | 567 | do { |
1da177e4 | 568 | yield(); |
79d381c9 | 569 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
1da177e4 LT |
570 | } |
571 | tasklet_unlock_wait(t); | |
572 | clear_bit(TASKLET_STATE_SCHED, &t->state); | |
573 | } | |
1da177e4 LT |
574 | EXPORT_SYMBOL(tasklet_kill); |
575 | ||
576 | void __init softirq_init(void) | |
577 | { | |
48f20a9a OJ |
578 | int cpu; |
579 | ||
580 | for_each_possible_cpu(cpu) { | |
581 | per_cpu(tasklet_vec, cpu).tail = | |
582 | &per_cpu(tasklet_vec, cpu).head; | |
583 | per_cpu(tasklet_hi_vec, cpu).tail = | |
584 | &per_cpu(tasklet_hi_vec, cpu).head; | |
585 | } | |
586 | ||
962cf36c CM |
587 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); |
588 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | |
1da177e4 LT |
589 | } |
590 | ||
3e339b5d | 591 | static int ksoftirqd_should_run(unsigned int cpu) |
1da177e4 | 592 | { |
3e339b5d TG |
593 | return local_softirq_pending(); |
594 | } | |
1da177e4 | 595 | |
3e339b5d TG |
596 | static void run_ksoftirqd(unsigned int cpu) |
597 | { | |
598 | local_irq_disable(); | |
599 | if (local_softirq_pending()) { | |
0bed698a FW |
600 | /* |
601 | * We can safely run softirq on inline stack, as we are not deep | |
602 | * in the task stack here. | |
603 | */ | |
3e339b5d | 604 | __do_softirq(); |
3e339b5d | 605 | local_irq_enable(); |
edf22f4c | 606 | cond_resched(); |
3e339b5d | 607 | return; |
1da177e4 | 608 | } |
3e339b5d | 609 | local_irq_enable(); |
1da177e4 LT |
610 | } |
611 | ||
612 | #ifdef CONFIG_HOTPLUG_CPU | |
613 | /* | |
614 | * tasklet_kill_immediate is called to remove a tasklet which can already be | |
615 | * scheduled for execution on @cpu. | |
616 | * | |
617 | * Unlike tasklet_kill, this function removes the tasklet | |
618 | * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. | |
619 | * | |
620 | * When this function is called, @cpu must be in the CPU_DEAD state. | |
621 | */ | |
622 | void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) | |
623 | { | |
624 | struct tasklet_struct **i; | |
625 | ||
626 | BUG_ON(cpu_online(cpu)); | |
627 | BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); | |
628 | ||
629 | if (!test_bit(TASKLET_STATE_SCHED, &t->state)) | |
630 | return; | |
631 | ||
632 | /* CPU is dead, so no lock needed. */ | |
48f20a9a | 633 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
1da177e4 LT |
634 | if (*i == t) { |
635 | *i = t->next; | |
48f20a9a OJ |
636 | /* If this was the tail element, move the tail ptr */ |
637 | if (*i == NULL) | |
638 | per_cpu(tasklet_vec, cpu).tail = i; | |
1da177e4 LT |
639 | return; |
640 | } | |
641 | } | |
642 | BUG(); | |
643 | } | |
644 | ||
c4544dbc | 645 | static int takeover_tasklets(unsigned int cpu) |
1da177e4 | 646 | { |
1da177e4 LT |
647 | /* CPU is dead, so no lock needed. */ |
648 | local_irq_disable(); | |
649 | ||
650 | /* Find end, append list for that CPU. */ | |
e5e41723 | 651 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
909ea964 | 652 | *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; |
8afecaa6 | 653 | __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
e5e41723 CB |
654 | per_cpu(tasklet_vec, cpu).head = NULL; |
655 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; | |
656 | } | |
1da177e4 LT |
657 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
658 | ||
e5e41723 | 659 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
909ea964 CL |
660 | *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; |
661 | __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); | |
e5e41723 CB |
662 | per_cpu(tasklet_hi_vec, cpu).head = NULL; |
663 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; | |
664 | } | |
1da177e4 LT |
665 | raise_softirq_irqoff(HI_SOFTIRQ); |
666 | ||
667 | local_irq_enable(); | |
c4544dbc | 668 | return 0; |
1da177e4 | 669 | } |
c4544dbc SAS |
670 | #else |
671 | #define takeover_tasklets NULL | |
1da177e4 LT |
672 | #endif /* CONFIG_HOTPLUG_CPU */ |
673 | ||
3e339b5d TG |
674 | static struct smp_hotplug_thread softirq_threads = { |
675 | .store = &ksoftirqd, | |
676 | .thread_should_run = ksoftirqd_should_run, | |
677 | .thread_fn = run_ksoftirqd, | |
678 | .thread_comm = "ksoftirqd/%u", | |
679 | }; | |
680 | ||
7babe8db | 681 | static __init int spawn_ksoftirqd(void) |
1da177e4 | 682 | { |
c4544dbc SAS |
683 | cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, |
684 | takeover_tasklets); | |
3e339b5d TG |
685 | BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); |
686 | ||
1da177e4 LT |
687 | return 0; |
688 | } | |
7babe8db | 689 | early_initcall(spawn_ksoftirqd); |
78eef01b | 690 | |
43a25632 YL |
691 | /* |
692 | * [ These __weak aliases are kept in a separate compilation unit, so that | |
693 | * GCC does not inline them incorrectly. ] | |
694 | */ | |
695 | ||
696 | int __init __weak early_irq_init(void) | |
697 | { | |
698 | return 0; | |
699 | } | |
700 | ||
4a046d17 YL |
701 | int __init __weak arch_probe_nr_irqs(void) |
702 | { | |
b683de2b | 703 | return NR_IRQS_LEGACY; |
4a046d17 YL |
704 | } |
705 | ||
43a25632 YL |
706 | int __init __weak arch_early_irq_init(void) |
707 | { | |
708 | return 0; | |
709 | } | |
62a08ae2 TG |
710 | |
711 | unsigned int __weak arch_dynirq_lower_bound(unsigned int from) | |
712 | { | |
713 | return from; | |
714 | } |