]>
Commit | Line | Data |
---|---|---|
b886d83c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
22a9d645 AV |
2 | /* |
3 | * async.c: Asynchronous function calls for boot performance | |
4 | * | |
5 | * (C) Copyright 2009 Intel Corporation | |
6 | * Author: Arjan van de Ven <[email protected]> | |
22a9d645 AV |
7 | */ |
8 | ||
9 | ||
10 | /* | |
11 | ||
12 | Goals and Theory of Operation | |
13 | ||
14 | The primary goal of this feature is to reduce the kernel boot time, | |
15 | by doing various independent hardware delays and discovery operations | |
16 | decoupled and not strictly serialized. | |
17 | ||
18 | More specifically, the asynchronous function call concept allows | |
19 | certain operations (primarily during system boot) to happen | |
20 | asynchronously, out of order, while these operations still | |
21 | have their externally visible parts happen sequentially and in-order. | |
22 | (not unlike how out-of-order CPUs retire their instructions in order) | |
23 | ||
24 | Key to the asynchronous function call implementation is the concept of | |
25 | a "sequence cookie" (which, although it has an abstracted type, can be | |
26 | thought of as a monotonically incrementing number). | |
27 | ||
28 | The async core will assign each scheduled event such a sequence cookie and | |
29 | pass this to the called functions. | |
30 | ||
31 | The asynchronously called function should before doing a globally visible | |
32 | operation, such as registering device numbers, call the | |
33 | async_synchronize_cookie() function and pass in its own cookie. The | |
34 | async_synchronize_cookie() function will make sure that all asynchronous | |
35 | operations that were scheduled prior to the operation corresponding with the | |
36 | cookie have completed. | |
37 | ||
38 | Subsystem/driver initialization code that scheduled asynchronous probe | |
39 | functions, but which shares global resources with other drivers/subsystems | |
40 | that do not use the asynchronous call feature, need to do a full | |
41 | synchronization with the async_synchronize_full() function, before returning | |
42 | from their init function. This is to maintain strict ordering between the | |
43 | asynchronous and synchronous parts of the kernel. | |
44 | ||
45 | */ | |
46 | ||
47 | #include <linux/async.h> | |
84c15027 PM |
48 | #include <linux/atomic.h> |
49 | #include <linux/ktime.h> | |
9984de1a | 50 | #include <linux/export.h> |
22a9d645 AV |
51 | #include <linux/wait.h> |
52 | #include <linux/sched.h> | |
5a0e3ad6 | 53 | #include <linux/slab.h> |
083b804c | 54 | #include <linux/workqueue.h> |
22a9d645 | 55 | |
84b233ad TH |
56 | #include "workqueue_internal.h" |
57 | ||
22a9d645 AV |
58 | static async_cookie_t next_cookie = 1; |
59 | ||
c68eee14 TH |
60 | #define MAX_WORK 32768 |
61 | #define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */ | |
22a9d645 | 62 | |
9fdb04cd | 63 | static LIST_HEAD(async_global_pending); /* pending from all registered doms */ |
8723d503 | 64 | static ASYNC_DOMAIN(async_dfl_domain); |
22a9d645 AV |
65 | static DEFINE_SPINLOCK(async_lock); |
66 | ||
67 | struct async_entry { | |
9fdb04cd TH |
68 | struct list_head domain_list; |
69 | struct list_head global_list; | |
083b804c TH |
70 | struct work_struct work; |
71 | async_cookie_t cookie; | |
362f2b09 | 72 | async_func_t func; |
083b804c | 73 | void *data; |
8723d503 | 74 | struct async_domain *domain; |
22a9d645 AV |
75 | }; |
76 | ||
77 | static DECLARE_WAIT_QUEUE_HEAD(async_done); | |
22a9d645 AV |
78 | |
79 | static atomic_t entry_count; | |
22a9d645 | 80 | |
8723d503 | 81 | static async_cookie_t lowest_in_progress(struct async_domain *domain) |
37a76bd4 | 82 | { |
4f7e988e | 83 | struct async_entry *first = NULL; |
52722794 | 84 | async_cookie_t ret = ASYNC_COOKIE_MAX; |
37a76bd4 | 85 | unsigned long flags; |
37a76bd4 AV |
86 | |
87 | spin_lock_irqsave(&async_lock, flags); | |
9fdb04cd | 88 | |
4f7e988e RV |
89 | if (domain) { |
90 | if (!list_empty(&domain->pending)) | |
91 | first = list_first_entry(&domain->pending, | |
92 | struct async_entry, domain_list); | |
93 | } else { | |
94 | if (!list_empty(&async_global_pending)) | |
95 | first = list_first_entry(&async_global_pending, | |
96 | struct async_entry, global_list); | |
97 | } | |
9fdb04cd | 98 | |
4f7e988e RV |
99 | if (first) |
100 | ret = first->cookie; | |
9fdb04cd | 101 | |
37a76bd4 AV |
102 | spin_unlock_irqrestore(&async_lock, flags); |
103 | return ret; | |
104 | } | |
083b804c | 105 | |
22a9d645 AV |
106 | /* |
107 | * pick the first pending entry and run it | |
108 | */ | |
083b804c | 109 | static void async_run_entry_fn(struct work_struct *work) |
22a9d645 | 110 | { |
083b804c TH |
111 | struct async_entry *entry = |
112 | container_of(work, struct async_entry, work); | |
22a9d645 | 113 | unsigned long flags; |
3f649ab7 | 114 | ktime_t calltime, delta, rettime; |
22a9d645 | 115 | |
52722794 | 116 | /* 1) run (and print duration) */ |
b4def427 | 117 | if (initcall_debug && system_state < SYSTEM_RUNNING) { |
d75f773c | 118 | pr_debug("calling %lli_%pS @ %i\n", |
84c15027 | 119 | (long long)entry->cookie, |
58763a29 | 120 | entry->func, task_pid_nr(current)); |
22a9d645 AV |
121 | calltime = ktime_get(); |
122 | } | |
123 | entry->func(entry->data, entry->cookie); | |
b4def427 | 124 | if (initcall_debug && system_state < SYSTEM_RUNNING) { |
22a9d645 AV |
125 | rettime = ktime_get(); |
126 | delta = ktime_sub(rettime, calltime); | |
d75f773c | 127 | pr_debug("initcall %lli_%pS returned 0 after %lld usecs\n", |
58763a29 AM |
128 | (long long)entry->cookie, |
129 | entry->func, | |
130 | (long long)ktime_to_ns(delta) >> 10); | |
22a9d645 AV |
131 | } |
132 | ||
52722794 | 133 | /* 2) remove self from the pending queues */ |
22a9d645 | 134 | spin_lock_irqsave(&async_lock, flags); |
9fdb04cd TH |
135 | list_del_init(&entry->domain_list); |
136 | list_del_init(&entry->global_list); | |
22a9d645 | 137 | |
52722794 | 138 | /* 3) free the entry */ |
22a9d645 AV |
139 | kfree(entry); |
140 | atomic_dec(&entry_count); | |
141 | ||
142 | spin_unlock_irqrestore(&async_lock, flags); | |
143 | ||
52722794 | 144 | /* 4) wake up any waiters */ |
22a9d645 | 145 | wake_up(&async_done); |
22a9d645 AV |
146 | } |
147 | ||
6be9238e AD |
148 | /** |
149 | * async_schedule_node_domain - NUMA specific version of async_schedule_domain | |
150 | * @func: function to execute asynchronously | |
151 | * @data: data pointer to pass to the function | |
152 | * @node: NUMA node that we want to schedule this on or close to | |
153 | * @domain: the domain | |
154 | * | |
155 | * Returns an async_cookie_t that may be used for checkpointing later. | |
156 | * @domain may be used in the async_synchronize_*_domain() functions to | |
157 | * wait within a certain synchronization domain rather than globally. | |
158 | * | |
159 | * Note: This function may be called from atomic or non-atomic contexts. | |
160 | * | |
161 | * The node requested will be honored on a best effort basis. If the node | |
162 | * has no CPUs associated with it then the work is distributed among all | |
163 | * available CPUs. | |
164 | */ | |
165 | async_cookie_t async_schedule_node_domain(async_func_t func, void *data, | |
166 | int node, struct async_domain *domain) | |
22a9d645 AV |
167 | { |
168 | struct async_entry *entry; | |
169 | unsigned long flags; | |
170 | async_cookie_t newcookie; | |
22a9d645 AV |
171 | |
172 | /* allow irq-off callers */ | |
173 | entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC); | |
174 | ||
175 | /* | |
176 | * If we're out of memory or if there's too much work | |
177 | * pending already, we execute synchronously. | |
178 | */ | |
083b804c | 179 | if (!entry || atomic_read(&entry_count) > MAX_WORK) { |
22a9d645 AV |
180 | kfree(entry); |
181 | spin_lock_irqsave(&async_lock, flags); | |
182 | newcookie = next_cookie++; | |
183 | spin_unlock_irqrestore(&async_lock, flags); | |
184 | ||
185 | /* low on memory.. run synchronously */ | |
362f2b09 | 186 | func(data, newcookie); |
22a9d645 AV |
187 | return newcookie; |
188 | } | |
a0327ff0 JH |
189 | INIT_LIST_HEAD(&entry->domain_list); |
190 | INIT_LIST_HEAD(&entry->global_list); | |
083b804c | 191 | INIT_WORK(&entry->work, async_run_entry_fn); |
362f2b09 | 192 | entry->func = func; |
22a9d645 | 193 | entry->data = data; |
8723d503 | 194 | entry->domain = domain; |
22a9d645 AV |
195 | |
196 | spin_lock_irqsave(&async_lock, flags); | |
9fdb04cd TH |
197 | |
198 | /* allocate cookie and queue */ | |
22a9d645 | 199 | newcookie = entry->cookie = next_cookie++; |
9fdb04cd TH |
200 | |
201 | list_add_tail(&entry->domain_list, &domain->pending); | |
202 | if (domain->registered) | |
203 | list_add_tail(&entry->global_list, &async_global_pending); | |
204 | ||
22a9d645 AV |
205 | atomic_inc(&entry_count); |
206 | spin_unlock_irqrestore(&async_lock, flags); | |
083b804c | 207 | |
774a1221 TH |
208 | /* mark that this task has queued an async job, used by module init */ |
209 | current->flags |= PF_USED_ASYNC; | |
210 | ||
083b804c | 211 | /* schedule for execution */ |
6be9238e | 212 | queue_work_node(node, system_unbound_wq, &entry->work); |
083b804c | 213 | |
22a9d645 AV |
214 | return newcookie; |
215 | } | |
6be9238e | 216 | EXPORT_SYMBOL_GPL(async_schedule_node_domain); |
22a9d645 | 217 | |
f30d5b30 | 218 | /** |
6be9238e | 219 | * async_schedule_node - NUMA specific version of async_schedule |
362f2b09 | 220 | * @func: function to execute asynchronously |
f30d5b30 | 221 | * @data: data pointer to pass to the function |
6be9238e | 222 | * @node: NUMA node that we want to schedule this on or close to |
f30d5b30 CH |
223 | * |
224 | * Returns an async_cookie_t that may be used for checkpointing later. | |
225 | * Note: This function may be called from atomic or non-atomic contexts. | |
f30d5b30 | 226 | * |
6be9238e AD |
227 | * The node requested will be honored on a best effort basis. If the node |
228 | * has no CPUs associated with it then the work is distributed among all | |
229 | * available CPUs. | |
f30d5b30 | 230 | */ |
6be9238e | 231 | async_cookie_t async_schedule_node(async_func_t func, void *data, int node) |
22a9d645 | 232 | { |
6be9238e | 233 | return async_schedule_node_domain(func, data, node, &async_dfl_domain); |
22a9d645 | 234 | } |
6be9238e | 235 | EXPORT_SYMBOL_GPL(async_schedule_node); |
22a9d645 | 236 | |
f30d5b30 CH |
237 | /** |
238 | * async_synchronize_full - synchronize all asynchronous function calls | |
239 | * | |
240 | * This function waits until all asynchronous function calls have been done. | |
241 | */ | |
22a9d645 AV |
242 | void async_synchronize_full(void) |
243 | { | |
9fdb04cd | 244 | async_synchronize_full_domain(NULL); |
22a9d645 AV |
245 | } |
246 | EXPORT_SYMBOL_GPL(async_synchronize_full); | |
247 | ||
a4683487 DW |
248 | /** |
249 | * async_unregister_domain - ensure no more anonymous waiters on this domain | |
250 | * @domain: idle domain to flush out of any async_synchronize_full instances | |
251 | * | |
252 | * async_synchronize_{cookie|full}_domain() are not flushed since callers | |
253 | * of these routines should know the lifetime of @domain | |
254 | * | |
255 | * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing | |
256 | */ | |
257 | void async_unregister_domain(struct async_domain *domain) | |
258 | { | |
a4683487 | 259 | spin_lock_irq(&async_lock); |
9fdb04cd | 260 | WARN_ON(!domain->registered || !list_empty(&domain->pending)); |
a4683487 DW |
261 | domain->registered = 0; |
262 | spin_unlock_irq(&async_lock); | |
a4683487 DW |
263 | } |
264 | EXPORT_SYMBOL_GPL(async_unregister_domain); | |
265 | ||
f30d5b30 | 266 | /** |
766ccb9e | 267 | * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain |
8723d503 | 268 | * @domain: the domain to synchronize |
f30d5b30 | 269 | * |
766ccb9e | 270 | * This function waits until all asynchronous function calls for the |
8723d503 | 271 | * synchronization domain specified by @domain have been done. |
f30d5b30 | 272 | */ |
2955b47d | 273 | void async_synchronize_full_domain(struct async_domain *domain) |
22a9d645 | 274 | { |
c68eee14 | 275 | async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain); |
22a9d645 | 276 | } |
766ccb9e | 277 | EXPORT_SYMBOL_GPL(async_synchronize_full_domain); |
22a9d645 | 278 | |
f30d5b30 | 279 | /** |
766ccb9e | 280 | * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing |
f30d5b30 | 281 | * @cookie: async_cookie_t to use as checkpoint |
9fdb04cd | 282 | * @domain: the domain to synchronize (%NULL for all registered domains) |
f30d5b30 | 283 | * |
766ccb9e | 284 | * This function waits until all asynchronous function calls for the |
8723d503 TH |
285 | * synchronization domain specified by @domain submitted prior to @cookie |
286 | * have been done. | |
f30d5b30 | 287 | */ |
8723d503 | 288 | void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain) |
22a9d645 | 289 | { |
3f649ab7 | 290 | ktime_t starttime, delta, endtime; |
22a9d645 | 291 | |
b4def427 | 292 | if (initcall_debug && system_state < SYSTEM_RUNNING) { |
27fb10ed | 293 | pr_debug("async_waiting @ %i\n", task_pid_nr(current)); |
22a9d645 AV |
294 | starttime = ktime_get(); |
295 | } | |
296 | ||
8723d503 | 297 | wait_event(async_done, lowest_in_progress(domain) >= cookie); |
22a9d645 | 298 | |
b4def427 | 299 | if (initcall_debug && system_state < SYSTEM_RUNNING) { |
22a9d645 AV |
300 | endtime = ktime_get(); |
301 | delta = ktime_sub(endtime, starttime); | |
302 | ||
27fb10ed | 303 | pr_debug("async_continuing @ %i after %lli usec\n", |
58763a29 AM |
304 | task_pid_nr(current), |
305 | (long long)ktime_to_ns(delta) >> 10); | |
22a9d645 AV |
306 | } |
307 | } | |
766ccb9e | 308 | EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain); |
22a9d645 | 309 | |
f30d5b30 CH |
310 | /** |
311 | * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing | |
312 | * @cookie: async_cookie_t to use as checkpoint | |
313 | * | |
314 | * This function waits until all asynchronous function calls prior to @cookie | |
315 | * have been done. | |
316 | */ | |
22a9d645 AV |
317 | void async_synchronize_cookie(async_cookie_t cookie) |
318 | { | |
8723d503 | 319 | async_synchronize_cookie_domain(cookie, &async_dfl_domain); |
22a9d645 AV |
320 | } |
321 | EXPORT_SYMBOL_GPL(async_synchronize_cookie); | |
84b233ad TH |
322 | |
323 | /** | |
324 | * current_is_async - is %current an async worker task? | |
325 | * | |
326 | * Returns %true if %current is an async worker task. | |
327 | */ | |
328 | bool current_is_async(void) | |
329 | { | |
330 | struct worker *worker = current_wq_worker(); | |
331 | ||
332 | return worker && worker->current_func == async_run_entry_fn; | |
333 | } | |
581da2ca | 334 | EXPORT_SYMBOL_GPL(current_is_async); |