int cpu = smp_processor_id();
rcu_irq_enter();
- if (idle_cpu(cpu) && !in_interrupt()) {
+ if (is_idle_task(current) && !in_interrupt()) {
/*
* Prevent raise_softirq from needlessly waking up ksoftirqd
* here, as softirq will be serviced on return from interrupt.
__irq_enter();
}
-#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
static inline void invoke_softirq(void)
{
- if (!force_irqthreads)
+ if (!force_irqthreads) {
+#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
__do_softirq();
- else {
- __local_bh_disable((unsigned long)__builtin_return_address(0),
- SOFTIRQ_OFFSET);
- wakeup_softirqd();
- __local_bh_enable(SOFTIRQ_OFFSET);
- }
-}
#else
-static inline void invoke_softirq(void)
-{
- if (!force_irqthreads)
do_softirq();
- else {
+#endif
+ } else {
__local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET);
wakeup_softirqd();
__local_bh_enable(SOFTIRQ_OFFSET);
}
}
-#endif
/*
* Exit an interrupt context. Process softirqs if needed and possible:
tick_nohz_irq_exit();
#endif
rcu_irq_exit();
- preempt_enable_no_resched();
+ sched_preempt_enable_no_resched();
}
/*
local_irq_restore(flags);
}
+void __raise_softirq_irqoff(unsigned int nr)
+{
+ trace_softirq_raise(nr);
+ or_softirq_pending(1UL << nr);
+}
+
void open_softirq(int nr, void (*action)(struct softirq_action *))
{
softirq_vec[nr].action = action;
if (local_softirq_pending())
__do_softirq();
local_irq_enable();
- preempt_enable_no_resched();
+ sched_preempt_enable_no_resched();
cond_resched();
preempt_disable();
rcu_note_context_switch((long)__bind_cpu);