]>
Commit | Line | Data |
---|---|---|
e360adbe PZ |
1 | /* |
2 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <[email protected]> | |
3 | * | |
4 | * Provides a framework for enqueueing and running callbacks from hardirq | |
5 | * context. The enqueueing is NMI-safe. | |
6 | */ | |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/irq_work.h> | |
11 | #include <linux/hardirq.h> | |
12 | ||
13 | /* | |
14 | * An entry can be in one of four states: | |
15 | * | |
16 | * free NULL, 0 -> {claimed} : free to be used | |
17 | * claimed NULL, 3 -> {pending} : claimed to be enqueued | |
18 | * pending next, 3 -> {busy} : queued, pending callback | |
19 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed | |
20 | * | |
21 | * We use the lower two bits of the next pointer to keep PENDING and BUSY | |
22 | * flags. | |
23 | */ | |
24 | ||
25 | #define IRQ_WORK_PENDING 1UL | |
26 | #define IRQ_WORK_BUSY 2UL | |
27 | #define IRQ_WORK_FLAGS 3UL | |
28 | ||
29 | static inline bool irq_work_is_set(struct irq_work *entry, int flags) | |
30 | { | |
31 | return (unsigned long)entry->next & flags; | |
32 | } | |
33 | ||
34 | static inline struct irq_work *irq_work_next(struct irq_work *entry) | |
35 | { | |
36 | unsigned long next = (unsigned long)entry->next; | |
37 | next &= ~IRQ_WORK_FLAGS; | |
38 | return (struct irq_work *)next; | |
39 | } | |
40 | ||
41 | static inline struct irq_work *next_flags(struct irq_work *entry, int flags) | |
42 | { | |
43 | unsigned long next = (unsigned long)entry; | |
44 | next |= flags; | |
45 | return (struct irq_work *)next; | |
46 | } | |
47 | ||
48 | static DEFINE_PER_CPU(struct irq_work *, irq_work_list); | |
49 | ||
50 | /* | |
51 | * Claim the entry so that no one else will poke at it. | |
52 | */ | |
53 | static bool irq_work_claim(struct irq_work *entry) | |
54 | { | |
55 | struct irq_work *next, *nflags; | |
56 | ||
57 | do { | |
58 | next = entry->next; | |
59 | if ((unsigned long)next & IRQ_WORK_PENDING) | |
60 | return false; | |
61 | nflags = next_flags(next, IRQ_WORK_FLAGS); | |
62 | } while (cmpxchg(&entry->next, next, nflags) != next); | |
63 | ||
64 | return true; | |
65 | } | |
66 | ||
67 | ||
68 | void __weak arch_irq_work_raise(void) | |
69 | { | |
70 | /* | |
71 | * Lame architectures will get the timer tick callback | |
72 | */ | |
73 | } | |
74 | ||
75 | /* | |
76 | * Queue the entry and raise the IPI if needed. | |
77 | */ | |
78 | static void __irq_work_queue(struct irq_work *entry) | |
79 | { | |
20b87691 | 80 | struct irq_work *next; |
e360adbe | 81 | |
20b87691 | 82 | preempt_disable(); |
e360adbe PZ |
83 | |
84 | do { | |
20b87691 | 85 | next = __this_cpu_read(irq_work_list); |
e360adbe PZ |
86 | /* Can assign non-atomic because we keep the flags set. */ |
87 | entry->next = next_flags(next, IRQ_WORK_FLAGS); | |
20b87691 | 88 | } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next); |
e360adbe PZ |
89 | |
90 | /* The list was empty, raise self-interrupt to start processing. */ | |
91 | if (!irq_work_next(entry)) | |
92 | arch_irq_work_raise(); | |
93 | ||
20b87691 | 94 | preempt_enable(); |
e360adbe PZ |
95 | } |
96 | ||
97 | /* | |
98 | * Enqueue the irq_work @entry, returns true on success, failure when the | |
99 | * @entry was already enqueued by someone else. | |
100 | * | |
101 | * Can be re-enqueued while the callback is still in progress. | |
102 | */ | |
103 | bool irq_work_queue(struct irq_work *entry) | |
104 | { | |
105 | if (!irq_work_claim(entry)) { | |
106 | /* | |
107 | * Already enqueued, can't do! | |
108 | */ | |
109 | return false; | |
110 | } | |
111 | ||
112 | __irq_work_queue(entry); | |
113 | return true; | |
114 | } | |
115 | EXPORT_SYMBOL_GPL(irq_work_queue); | |
116 | ||
117 | /* | |
118 | * Run the irq_work entries on this cpu. Requires to be ran from hardirq | |
119 | * context with local IRQs disabled. | |
120 | */ | |
121 | void irq_work_run(void) | |
122 | { | |
20b87691 | 123 | struct irq_work *list; |
e360adbe | 124 | |
20b87691 | 125 | if (this_cpu_read(irq_work_list) == NULL) |
e360adbe PZ |
126 | return; |
127 | ||
128 | BUG_ON(!in_irq()); | |
129 | BUG_ON(!irqs_disabled()); | |
130 | ||
20b87691 CL |
131 | list = this_cpu_xchg(irq_work_list, NULL); |
132 | ||
e360adbe PZ |
133 | while (list != NULL) { |
134 | struct irq_work *entry = list; | |
135 | ||
136 | list = irq_work_next(list); | |
137 | ||
138 | /* | |
139 | * Clear the PENDING bit, after this point the @entry | |
140 | * can be re-used. | |
141 | */ | |
142 | entry->next = next_flags(NULL, IRQ_WORK_BUSY); | |
143 | entry->func(entry); | |
144 | /* | |
145 | * Clear the BUSY bit and return to the free state if | |
146 | * no-one else claimed it meanwhile. | |
147 | */ | |
94e8ba72 SA |
148 | (void)cmpxchg(&entry->next, |
149 | next_flags(NULL, IRQ_WORK_BUSY), | |
150 | NULL); | |
e360adbe PZ |
151 | } |
152 | } | |
153 | EXPORT_SYMBOL_GPL(irq_work_run); | |
154 | ||
155 | /* | |
156 | * Synchronize against the irq_work @entry, ensures the entry is not | |
157 | * currently in use. | |
158 | */ | |
159 | void irq_work_sync(struct irq_work *entry) | |
160 | { | |
161 | WARN_ON_ONCE(irqs_disabled()); | |
162 | ||
163 | while (irq_work_is_set(entry, IRQ_WORK_BUSY)) | |
164 | cpu_relax(); | |
165 | } | |
166 | EXPORT_SYMBOL_GPL(irq_work_sync); |