]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef LINUX_HARDIRQ_H |
2 | #define LINUX_HARDIRQ_H | |
3 | ||
67bc4eb0 | 4 | #include <linux/preempt.h> |
1da177e4 | 5 | #include <linux/smp_lock.h> |
fbb9ce95 | 6 | #include <linux/lockdep.h> |
1da177e4 LT |
7 | #include <asm/hardirq.h> |
8 | #include <asm/system.h> | |
9 | ||
10 | /* | |
11 | * We put the hardirq and softirq counter into the preemption | |
12 | * counter. The bitmask has the following meaning: | |
13 | * | |
14 | * - bits 0-7 are the preemption count (max preemption depth: 256) | |
15 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | |
16 | * | |
17 | * The hardirq count can be overridden per architecture, the default is: | |
18 | * | |
19 | * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) | |
20 | * - ( bit 28 is the PREEMPT_ACTIVE flag. ) | |
21 | * | |
22 | * PREEMPT_MASK: 0x000000ff | |
23 | * SOFTIRQ_MASK: 0x0000ff00 | |
24 | * HARDIRQ_MASK: 0x0fff0000 | |
25 | */ | |
26 | #define PREEMPT_BITS 8 | |
27 | #define SOFTIRQ_BITS 8 | |
28 | ||
29 | #ifndef HARDIRQ_BITS | |
30 | #define HARDIRQ_BITS 12 | |
23d0b8b0 EB |
31 | |
32 | #ifndef MAX_HARDIRQS_PER_CPU | |
33 | #define MAX_HARDIRQS_PER_CPU NR_IRQS | |
34 | #endif | |
35 | ||
1da177e4 LT |
36 | /* |
37 | * The hardirq mask has to be large enough to have space for potentially | |
38 | * all IRQ sources in the system nesting on a single CPU. | |
39 | */ | |
23d0b8b0 | 40 | #if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU |
1da177e4 LT |
41 | # error HARDIRQ_BITS is too low! |
42 | #endif | |
43 | #endif | |
44 | ||
45 | #define PREEMPT_SHIFT 0 | |
46 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | |
47 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | |
48 | ||
49 | #define __IRQ_MASK(x) ((1UL << (x))-1) | |
50 | ||
51 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | |
1da177e4 | 52 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) |
8f28e8fa | 53 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) |
1da177e4 LT |
54 | |
55 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | |
56 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | |
57 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | |
58 | ||
8f28e8fa PBG |
59 | #if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) |
60 | #error PREEMPT_ACTIVE is too low! | |
61 | #endif | |
62 | ||
1da177e4 LT |
63 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
64 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) | |
65 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) | |
66 | ||
67 | /* | |
68 | * Are we doing bottom half or hardware interrupt processing? | |
69 | * Are we in a softirq context? Interrupt context? | |
70 | */ | |
71 | #define in_irq() (hardirq_count()) | |
72 | #define in_softirq() (softirq_count()) | |
73 | #define in_interrupt() (irq_count()) | |
74 | ||
75 | #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) | |
76 | # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) | |
77 | #else | |
78 | # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) | |
79 | #endif | |
80 | ||
4da1ce6d IM |
81 | #ifdef CONFIG_PREEMPT |
82 | # define PREEMPT_CHECK_OFFSET 1 | |
83 | #else | |
84 | # define PREEMPT_CHECK_OFFSET 0 | |
85 | #endif | |
86 | ||
87 | /* | |
88 | * Check whether we were atomic before we did preempt_disable(): | |
89 | * (used by the scheduler) | |
90 | */ | |
91 | #define in_atomic_preempt_off() \ | |
92 | ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) | |
93 | ||
1da177e4 LT |
94 | #ifdef CONFIG_PREEMPT |
95 | # define preemptible() (preempt_count() == 0 && !irqs_disabled()) | |
96 | # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) | |
97 | #else | |
98 | # define preemptible() 0 | |
99 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET | |
100 | #endif | |
101 | ||
102 | #ifdef CONFIG_SMP | |
103 | extern void synchronize_irq(unsigned int irq); | |
104 | #else | |
105 | # define synchronize_irq(irq) barrier() | |
106 | #endif | |
107 | ||
f037360f AV |
108 | struct task_struct; |
109 | ||
1da177e4 | 110 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
1da177e4 LT |
111 | static inline void account_system_vtime(struct task_struct *tsk) |
112 | { | |
113 | } | |
114 | #endif | |
115 | ||
de30a2b3 IM |
116 | /* |
117 | * It is safe to do non-atomic ops on ->hardirq_context, | |
118 | * because NMI handlers may not preempt and the ops are | |
119 | * always balanced, so the interrupted value of ->hardirq_context | |
120 | * will always be restored. | |
121 | */ | |
79bf2bb3 TG |
122 | #define __irq_enter() \ |
123 | do { \ | |
124 | account_system_vtime(current); \ | |
125 | add_preempt_count(HARDIRQ_OFFSET); \ | |
126 | trace_hardirq_enter(); \ | |
127 | } while (0) | |
128 | ||
129 | /* | |
130 | * Enter irq context (on NO_HZ, update jiffies): | |
131 | */ | |
dde4b2b5 | 132 | extern void irq_enter(void); |
de30a2b3 IM |
133 | |
134 | /* | |
135 | * Exit irq context without processing softirqs: | |
136 | */ | |
137 | #define __irq_exit() \ | |
138 | do { \ | |
139 | trace_hardirq_exit(); \ | |
140 | account_system_vtime(current); \ | |
141 | sub_preempt_count(HARDIRQ_OFFSET); \ | |
1da177e4 LT |
142 | } while (0) |
143 | ||
de30a2b3 IM |
144 | /* |
145 | * Exit irq context and process softirqs if needed: | |
146 | */ | |
1da177e4 LT |
147 | extern void irq_exit(void); |
148 | ||
79bf2bb3 | 149 | #define nmi_enter() do { lockdep_off(); __irq_enter(); } while (0) |
fbb9ce95 | 150 | #define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0) |
de30a2b3 | 151 | |
1da177e4 | 152 | #endif /* LINUX_HARDIRQ_H */ |