]>
Commit | Line | Data |
---|---|---|
08b21fbf | 1 | // SPDX-License-Identifier: GPL-2.0 |
16295bec SK |
2 | /* |
3 | * padata.c - generic interface to process data streams in parallel | |
4 | * | |
107f8bda SK |
5 | * See Documentation/padata.txt for an api documentation. |
6 | * | |
16295bec SK |
7 | * Copyright (C) 2008, 2009 secunet Security Networks AG |
8 | * Copyright (C) 2008, 2009 Steffen Klassert <[email protected]> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify it | |
11 | * under the terms and conditions of the GNU General Public License, | |
12 | * version 2, as published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope it will be useful, but WITHOUT | |
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
17 | * more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License along with | |
20 | * this program; if not, write to the Free Software Foundation, Inc., | |
21 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
22 | */ | |
23 | ||
9984de1a | 24 | #include <linux/export.h> |
16295bec SK |
25 | #include <linux/cpumask.h> |
26 | #include <linux/err.h> | |
27 | #include <linux/cpu.h> | |
28 | #include <linux/padata.h> | |
29 | #include <linux/mutex.h> | |
30 | #include <linux/sched.h> | |
5a0e3ad6 | 31 | #include <linux/slab.h> |
5e017dc3 | 32 | #include <linux/sysfs.h> |
16295bec | 33 | #include <linux/rcupdate.h> |
30e92153 | 34 | #include <linux/module.h> |
16295bec | 35 | |
97e3d94a | 36 | #define MAX_OBJ_NUM 1000 |
16295bec SK |
37 | |
38 | static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) | |
39 | { | |
40 | int cpu, target_cpu; | |
41 | ||
e15bacbe | 42 | target_cpu = cpumask_first(pd->cpumask.pcpu); |
16295bec | 43 | for (cpu = 0; cpu < cpu_index; cpu++) |
e15bacbe | 44 | target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); |
16295bec SK |
45 | |
46 | return target_cpu; | |
47 | } | |
48 | ||
bfde23ce | 49 | static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr) |
16295bec | 50 | { |
16295bec SK |
51 | /* |
52 | * Hash the sequence numbers to the cpus by taking | |
53 | * seq_nr mod. number of cpus in use. | |
54 | */ | |
bfde23ce | 55 | int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); |
16295bec SK |
56 | |
57 | return padata_index_to_cpu(pd, cpu_index); | |
58 | } | |
59 | ||
e15bacbe | 60 | static void padata_parallel_worker(struct work_struct *parallel_work) |
16295bec | 61 | { |
e15bacbe | 62 | struct padata_parallel_queue *pqueue; |
16295bec SK |
63 | LIST_HEAD(local_list); |
64 | ||
65 | local_bh_disable(); | |
e15bacbe DK |
66 | pqueue = container_of(parallel_work, |
67 | struct padata_parallel_queue, work); | |
16295bec | 68 | |
e15bacbe DK |
69 | spin_lock(&pqueue->parallel.lock); |
70 | list_replace_init(&pqueue->parallel.list, &local_list); | |
71 | spin_unlock(&pqueue->parallel.lock); | |
16295bec SK |
72 | |
73 | while (!list_empty(&local_list)) { | |
74 | struct padata_priv *padata; | |
75 | ||
76 | padata = list_entry(local_list.next, | |
77 | struct padata_priv, list); | |
78 | ||
79 | list_del_init(&padata->list); | |
80 | ||
81 | padata->parallel(padata); | |
82 | } | |
83 | ||
84 | local_bh_enable(); | |
85 | } | |
86 | ||
0198ffd1 | 87 | /** |
16295bec SK |
88 | * padata_do_parallel - padata parallelization function |
89 | * | |
90 | * @pinst: padata instance | |
91 | * @padata: object to be parallelized | |
e6ce0e08 DJ |
92 | * @cb_cpu: pointer to the CPU that the serialization callback function should |
93 | * run on. If it's not in the serial cpumask of @pinst | |
94 | * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if | |
95 | * none found, returns -EINVAL. | |
16295bec SK |
96 | * |
97 | * The parallelization callback function will run with BHs off. | |
98 | * Note: Every object which is parallelized by padata_do_parallel | |
99 | * must be seen by padata_do_serial. | |
100 | */ | |
101 | int padata_do_parallel(struct padata_instance *pinst, | |
e6ce0e08 | 102 | struct padata_priv *padata, int *cb_cpu) |
16295bec | 103 | { |
e6ce0e08 | 104 | int i, cpu, cpu_index, target_cpu, err; |
e15bacbe | 105 | struct padata_parallel_queue *queue; |
16295bec SK |
106 | struct parallel_data *pd; |
107 | ||
108 | rcu_read_lock_bh(); | |
109 | ||
c0e656b7 | 110 | pd = rcu_dereference_bh(pinst->pd); |
16295bec | 111 | |
83f619f3 | 112 | err = -EINVAL; |
7424713b | 113 | if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) |
16295bec SK |
114 | goto out; |
115 | ||
e6ce0e08 DJ |
116 | if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { |
117 | if (!cpumask_weight(pd->cpumask.cbcpu)) | |
118 | goto out; | |
119 | ||
120 | /* Select an alternate fallback CPU and notify the caller. */ | |
121 | cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); | |
122 | ||
123 | cpu = cpumask_first(pd->cpumask.cbcpu); | |
124 | for (i = 0; i < cpu_index; i++) | |
125 | cpu = cpumask_next(cpu, pd->cpumask.cbcpu); | |
126 | ||
127 | *cb_cpu = cpu; | |
128 | } | |
16295bec SK |
129 | |
130 | err = -EBUSY; | |
131 | if ((pinst->flags & PADATA_RESET)) | |
132 | goto out; | |
133 | ||
134 | if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) | |
135 | goto out; | |
136 | ||
83f619f3 | 137 | err = 0; |
16295bec SK |
138 | atomic_inc(&pd->refcnt); |
139 | padata->pd = pd; | |
e6ce0e08 | 140 | padata->cb_cpu = *cb_cpu; |
16295bec | 141 | |
bfde23ce DJ |
142 | padata->seq_nr = atomic_inc_return(&pd->seq_nr); |
143 | target_cpu = padata_cpu_hash(pd, padata->seq_nr); | |
350ef88e | 144 | padata->cpu = target_cpu; |
e15bacbe | 145 | queue = per_cpu_ptr(pd->pqueue, target_cpu); |
16295bec SK |
146 | |
147 | spin_lock(&queue->parallel.lock); | |
148 | list_add_tail(&padata->list, &queue->parallel.list); | |
149 | spin_unlock(&queue->parallel.lock); | |
150 | ||
bfde23ce | 151 | queue_work(pinst->parallel_wq, &queue->work); |
16295bec SK |
152 | |
153 | out: | |
154 | rcu_read_unlock_bh(); | |
155 | ||
156 | return err; | |
157 | } | |
158 | EXPORT_SYMBOL(padata_do_parallel); | |
159 | ||
0198ffd1 | 160 | /* |
bfde23ce | 161 | * padata_find_next - Find the next object that needs serialization. |
0198ffd1 SK |
162 | * |
163 | * Return values are: | |
164 | * | |
165 | * A pointer to the control struct of the next object that needs | |
166 | * serialization, if present in one of the percpu reorder queues. | |
167 | * | |
bfde23ce | 168 | * NULL, if the next object that needs serialization will |
0198ffd1 SK |
169 | * be parallel processed by another cpu and is not yet present in |
170 | * the cpu's reorder queue. | |
0198ffd1 | 171 | */ |
bfde23ce DJ |
172 | static struct padata_priv *padata_find_next(struct parallel_data *pd, |
173 | bool remove_object) | |
16295bec | 174 | { |
f0fcf200 | 175 | struct padata_parallel_queue *next_queue; |
16295bec SK |
176 | struct padata_priv *padata; |
177 | struct padata_list *reorder; | |
6fc4dbcf | 178 | int cpu = pd->cpu; |
16295bec | 179 | |
e15bacbe | 180 | next_queue = per_cpu_ptr(pd->pqueue, cpu); |
16295bec SK |
181 | reorder = &next_queue->reorder; |
182 | ||
de5540d0 | 183 | spin_lock(&reorder->lock); |
bfde23ce DJ |
184 | if (list_empty(&reorder->list)) { |
185 | spin_unlock(&reorder->lock); | |
186 | return NULL; | |
187 | } | |
16295bec | 188 | |
bfde23ce | 189 | padata = list_entry(reorder->list.next, struct padata_priv, list); |
16295bec | 190 | |
bfde23ce DJ |
191 | /* |
192 | * Checks the rare case where two or more parallel jobs have hashed to | |
193 | * the same CPU and one of the later ones finishes first. | |
194 | */ | |
195 | if (padata->seq_nr != pd->processed) { | |
de5540d0 | 196 | spin_unlock(&reorder->lock); |
bfde23ce | 197 | return NULL; |
16295bec SK |
198 | } |
199 | ||
bfde23ce DJ |
200 | if (remove_object) { |
201 | list_del_init(&padata->list); | |
202 | atomic_dec(&pd->reorder_objects); | |
203 | ++pd->processed; | |
204 | pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false); | |
16295bec SK |
205 | } |
206 | ||
bfde23ce | 207 | spin_unlock(&reorder->lock); |
16295bec SK |
208 | return padata; |
209 | } | |
210 | ||
211 | static void padata_reorder(struct parallel_data *pd) | |
212 | { | |
3047817b | 213 | int cb_cpu; |
16295bec | 214 | struct padata_priv *padata; |
e15bacbe | 215 | struct padata_serial_queue *squeue; |
16295bec | 216 | struct padata_instance *pinst = pd->pinst; |
6fc4dbcf | 217 | struct padata_parallel_queue *next_queue; |
16295bec | 218 | |
0198ffd1 SK |
219 | /* |
220 | * We need to ensure that only one cpu can work on dequeueing of | |
221 | * the reorder queue the time. Calculating in which percpu reorder | |
222 | * queue the next object will arrive takes some time. A spinlock | |
223 | * would be highly contended. Also it is not clear in which order | |
224 | * the objects arrive to the reorder queues. So a cpu could wait to | |
225 | * get the lock just to notice that there is nothing to do at the | |
226 | * moment. Therefore we use a trylock and let the holder of the lock | |
227 | * care for all the objects enqueued during the holdtime of the lock. | |
228 | */ | |
16295bec | 229 | if (!spin_trylock_bh(&pd->lock)) |
d46a5ac7 | 230 | return; |
16295bec SK |
231 | |
232 | while (1) { | |
bfde23ce | 233 | padata = padata_find_next(pd, true); |
16295bec | 234 | |
0198ffd1 | 235 | /* |
69b34844 JD |
236 | * If the next object that needs serialization is parallel |
237 | * processed by another cpu and is still on it's way to the | |
238 | * cpu's reorder queue, nothing to do for now. | |
0198ffd1 | 239 | */ |
bfde23ce | 240 | if (!padata) |
16295bec SK |
241 | break; |
242 | ||
3047817b SK |
243 | cb_cpu = padata->cb_cpu; |
244 | squeue = per_cpu_ptr(pd->squeue, cb_cpu); | |
16295bec | 245 | |
e15bacbe DK |
246 | spin_lock(&squeue->serial.lock); |
247 | list_add_tail(&padata->list, &squeue->serial.list); | |
248 | spin_unlock(&squeue->serial.lock); | |
16295bec | 249 | |
45d153c0 | 250 | queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work); |
16295bec SK |
251 | } |
252 | ||
253 | spin_unlock_bh(&pd->lock); | |
254 | ||
0198ffd1 SK |
255 | /* |
256 | * The next object that needs serialization might have arrived to | |
6fc4dbcf | 257 | * the reorder queues in the meantime. |
cf144f81 | 258 | * |
6fc4dbcf HX |
259 | * Ensure reorder queue is read after pd->lock is dropped so we see |
260 | * new objects from another task in padata_do_serial. Pairs with | |
cf144f81 | 261 | * smp_mb__after_atomic in padata_do_serial. |
0198ffd1 | 262 | */ |
cf144f81 | 263 | smp_mb(); |
16295bec | 264 | |
6fc4dbcf | 265 | next_queue = per_cpu_ptr(pd->pqueue, pd->cpu); |
bfde23ce DJ |
266 | if (!list_empty(&next_queue->reorder.list) && |
267 | padata_find_next(pd, false)) | |
45d153c0 | 268 | queue_work(pinst->serial_wq, &pd->reorder_work); |
16295bec SK |
269 | } |
270 | ||
cf5868c8 MK |
271 | static void invoke_padata_reorder(struct work_struct *work) |
272 | { | |
cf5868c8 MK |
273 | struct parallel_data *pd; |
274 | ||
275 | local_bh_disable(); | |
6fc4dbcf | 276 | pd = container_of(work, struct parallel_data, reorder_work); |
cf5868c8 MK |
277 | padata_reorder(pd); |
278 | local_bh_enable(); | |
279 | } | |
280 | ||
e15bacbe | 281 | static void padata_serial_worker(struct work_struct *serial_work) |
16295bec | 282 | { |
e15bacbe | 283 | struct padata_serial_queue *squeue; |
16295bec SK |
284 | struct parallel_data *pd; |
285 | LIST_HEAD(local_list); | |
286 | ||
287 | local_bh_disable(); | |
e15bacbe DK |
288 | squeue = container_of(serial_work, struct padata_serial_queue, work); |
289 | pd = squeue->pd; | |
16295bec | 290 | |
e15bacbe DK |
291 | spin_lock(&squeue->serial.lock); |
292 | list_replace_init(&squeue->serial.list, &local_list); | |
293 | spin_unlock(&squeue->serial.lock); | |
16295bec SK |
294 | |
295 | while (!list_empty(&local_list)) { | |
296 | struct padata_priv *padata; | |
297 | ||
298 | padata = list_entry(local_list.next, | |
299 | struct padata_priv, list); | |
300 | ||
301 | list_del_init(&padata->list); | |
302 | ||
303 | padata->serial(padata); | |
304 | atomic_dec(&pd->refcnt); | |
305 | } | |
306 | local_bh_enable(); | |
307 | } | |
308 | ||
0198ffd1 | 309 | /** |
16295bec SK |
310 | * padata_do_serial - padata serialization function |
311 | * | |
312 | * @padata: object to be serialized. | |
313 | * | |
314 | * padata_do_serial must be called for every parallelized object. | |
315 | * The serialization callback function will run with BHs off. | |
316 | */ | |
317 | void padata_do_serial(struct padata_priv *padata) | |
318 | { | |
065cf577 DJ |
319 | struct parallel_data *pd = padata->pd; |
320 | struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue, | |
321 | padata->cpu); | |
bfde23ce | 322 | struct padata_priv *cur; |
16295bec | 323 | |
e15bacbe | 324 | spin_lock(&pqueue->reorder.lock); |
bfde23ce DJ |
325 | /* Sort in ascending order of sequence number. */ |
326 | list_for_each_entry_reverse(cur, &pqueue->reorder.list, list) | |
327 | if (cur->seq_nr < padata->seq_nr) | |
328 | break; | |
329 | list_add(&padata->list, &cur->list); | |
6fc4dbcf | 330 | atomic_inc(&pd->reorder_objects); |
e15bacbe | 331 | spin_unlock(&pqueue->reorder.lock); |
16295bec | 332 | |
cf144f81 | 333 | /* |
6fc4dbcf | 334 | * Ensure the addition to the reorder list is ordered correctly |
cf144f81 DJ |
335 | * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb |
336 | * in padata_reorder. | |
337 | */ | |
338 | smp_mb__after_atomic(); | |
339 | ||
6fc4dbcf | 340 | padata_reorder(pd); |
16295bec SK |
341 | } |
342 | EXPORT_SYMBOL(padata_do_serial); | |
343 | ||
e15bacbe DK |
344 | static int padata_setup_cpumasks(struct parallel_data *pd, |
345 | const struct cpumask *pcpumask, | |
346 | const struct cpumask *cbcpumask) | |
16295bec | 347 | { |
bfde23ce DJ |
348 | struct workqueue_attrs *attrs; |
349 | int err = -ENOMEM; | |
16295bec | 350 | |
bfde23ce DJ |
351 | if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) |
352 | goto out; | |
13614e0f | 353 | cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); |
16295bec | 354 | |
bfde23ce DJ |
355 | if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) |
356 | goto free_pcpu_mask; | |
13614e0f | 357 | cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); |
bfde23ce DJ |
358 | |
359 | attrs = alloc_workqueue_attrs(); | |
360 | if (!attrs) | |
361 | goto free_cbcpu_mask; | |
362 | ||
363 | /* Restrict parallel_wq workers to pd->cpumask.pcpu. */ | |
364 | cpumask_copy(attrs->cpumask, pd->cpumask.pcpu); | |
365 | err = apply_workqueue_attrs(pd->pinst->parallel_wq, attrs); | |
366 | free_workqueue_attrs(attrs); | |
367 | if (err < 0) | |
368 | goto free_cbcpu_mask; | |
369 | ||
e15bacbe | 370 | return 0; |
bfde23ce DJ |
371 | |
372 | free_cbcpu_mask: | |
373 | free_cpumask_var(pd->cpumask.cbcpu); | |
374 | free_pcpu_mask: | |
375 | free_cpumask_var(pd->cpumask.pcpu); | |
376 | out: | |
377 | return err; | |
e15bacbe | 378 | } |
16295bec | 379 | |
e15bacbe DK |
380 | static void __padata_list_init(struct padata_list *pd_list) |
381 | { | |
382 | INIT_LIST_HEAD(&pd_list->list); | |
383 | spin_lock_init(&pd_list->lock); | |
384 | } | |
16295bec | 385 | |
e15bacbe DK |
386 | /* Initialize all percpu queues used by serial workers */ |
387 | static void padata_init_squeues(struct parallel_data *pd) | |
388 | { | |
389 | int cpu; | |
390 | struct padata_serial_queue *squeue; | |
7b389b2c | 391 | |
e15bacbe DK |
392 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
393 | squeue = per_cpu_ptr(pd->squeue, cpu); | |
394 | squeue->pd = pd; | |
395 | __padata_list_init(&squeue->serial); | |
396 | INIT_WORK(&squeue->work, padata_serial_worker); | |
397 | } | |
398 | } | |
16295bec | 399 | |
e15bacbe DK |
400 | /* Initialize all percpu queues used by parallel workers */ |
401 | static void padata_init_pqueues(struct parallel_data *pd) | |
402 | { | |
c51636a3 | 403 | int cpu; |
e15bacbe | 404 | struct padata_parallel_queue *pqueue; |
16295bec | 405 | |
c51636a3 | 406 | for_each_cpu(cpu, pd->cpumask.pcpu) { |
e15bacbe | 407 | pqueue = per_cpu_ptr(pd->pqueue, cpu); |
1bd845bc | 408 | |
e15bacbe DK |
409 | __padata_list_init(&pqueue->reorder); |
410 | __padata_list_init(&pqueue->parallel); | |
411 | INIT_WORK(&pqueue->work, padata_parallel_worker); | |
412 | atomic_set(&pqueue->num_obj, 0); | |
16295bec | 413 | } |
e15bacbe | 414 | } |
16295bec | 415 | |
e15bacbe DK |
416 | /* Allocate and initialize the internal cpumask dependend resources. */ |
417 | static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | |
418 | const struct cpumask *pcpumask, | |
419 | const struct cpumask *cbcpumask) | |
420 | { | |
421 | struct parallel_data *pd; | |
16295bec | 422 | |
e15bacbe DK |
423 | pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); |
424 | if (!pd) | |
425 | goto err; | |
16295bec | 426 | |
e15bacbe DK |
427 | pd->pqueue = alloc_percpu(struct padata_parallel_queue); |
428 | if (!pd->pqueue) | |
429 | goto err_free_pd; | |
430 | ||
431 | pd->squeue = alloc_percpu(struct padata_serial_queue); | |
432 | if (!pd->squeue) | |
433 | goto err_free_pqueue; | |
bfde23ce DJ |
434 | |
435 | pd->pinst = pinst; | |
e15bacbe DK |
436 | if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) |
437 | goto err_free_squeue; | |
16295bec | 438 | |
e15bacbe DK |
439 | padata_init_pqueues(pd); |
440 | padata_init_squeues(pd); | |
0b6b098e | 441 | atomic_set(&pd->seq_nr, -1); |
16295bec SK |
442 | atomic_set(&pd->reorder_objects, 0); |
443 | atomic_set(&pd->refcnt, 0); | |
16295bec | 444 | spin_lock_init(&pd->lock); |
ec9c7d19 | 445 | pd->cpu = cpumask_first(pd->cpumask.pcpu); |
6fc4dbcf | 446 | INIT_WORK(&pd->reorder_work, invoke_padata_reorder); |
16295bec SK |
447 | |
448 | return pd; | |
449 | ||
e15bacbe DK |
450 | err_free_squeue: |
451 | free_percpu(pd->squeue); | |
452 | err_free_pqueue: | |
453 | free_percpu(pd->pqueue); | |
16295bec SK |
454 | err_free_pd: |
455 | kfree(pd); | |
456 | err: | |
457 | return NULL; | |
458 | } | |
459 | ||
460 | static void padata_free_pd(struct parallel_data *pd) | |
461 | { | |
e15bacbe DK |
462 | free_cpumask_var(pd->cpumask.pcpu); |
463 | free_cpumask_var(pd->cpumask.cbcpu); | |
464 | free_percpu(pd->pqueue); | |
465 | free_percpu(pd->squeue); | |
16295bec SK |
466 | kfree(pd); |
467 | } | |
468 | ||
0198ffd1 | 469 | /* Flush all objects out of the padata queues. */ |
2b73b07a SK |
470 | static void padata_flush_queues(struct parallel_data *pd) |
471 | { | |
472 | int cpu; | |
e15bacbe DK |
473 | struct padata_parallel_queue *pqueue; |
474 | struct padata_serial_queue *squeue; | |
2b73b07a | 475 | |
e15bacbe DK |
476 | for_each_cpu(cpu, pd->cpumask.pcpu) { |
477 | pqueue = per_cpu_ptr(pd->pqueue, cpu); | |
478 | flush_work(&pqueue->work); | |
2b73b07a SK |
479 | } |
480 | ||
2b73b07a SK |
481 | if (atomic_read(&pd->reorder_objects)) |
482 | padata_reorder(pd); | |
483 | ||
e15bacbe DK |
484 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
485 | squeue = per_cpu_ptr(pd->squeue, cpu); | |
486 | flush_work(&squeue->work); | |
2b73b07a SK |
487 | } |
488 | ||
489 | BUG_ON(atomic_read(&pd->refcnt) != 0); | |
490 | } | |
491 | ||
4c879170 SK |
492 | static void __padata_start(struct padata_instance *pinst) |
493 | { | |
494 | pinst->flags |= PADATA_INIT; | |
495 | } | |
496 | ||
ee836555 SK |
497 | static void __padata_stop(struct padata_instance *pinst) |
498 | { | |
499 | if (!(pinst->flags & PADATA_INIT)) | |
500 | return; | |
501 | ||
502 | pinst->flags &= ~PADATA_INIT; | |
503 | ||
504 | synchronize_rcu(); | |
505 | ||
506 | get_online_cpus(); | |
507 | padata_flush_queues(pinst->pd); | |
508 | put_online_cpus(); | |
509 | } | |
510 | ||
25985edc | 511 | /* Replace the internal control structure with a new one. */ |
16295bec SK |
512 | static void padata_replace(struct padata_instance *pinst, |
513 | struct parallel_data *pd_new) | |
514 | { | |
515 | struct parallel_data *pd_old = pinst->pd; | |
e15bacbe | 516 | int notification_mask = 0; |
16295bec SK |
517 | |
518 | pinst->flags |= PADATA_RESET; | |
519 | ||
520 | rcu_assign_pointer(pinst->pd, pd_new); | |
521 | ||
522 | synchronize_rcu(); | |
523 | ||
e15bacbe DK |
524 | if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) |
525 | notification_mask |= PADATA_CPU_PARALLEL; | |
526 | if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) | |
527 | notification_mask |= PADATA_CPU_SERIAL; | |
528 | ||
2b73b07a | 529 | padata_flush_queues(pd_old); |
16295bec SK |
530 | padata_free_pd(pd_old); |
531 | ||
e15bacbe DK |
532 | if (notification_mask) |
533 | blocking_notifier_call_chain(&pinst->cpumask_change_notifier, | |
c635696c SK |
534 | notification_mask, |
535 | &pd_new->cpumask); | |
16295bec SK |
536 | |
537 | pinst->flags &= ~PADATA_RESET; | |
538 | } | |
539 | ||
0198ffd1 | 540 | /** |
e15bacbe DK |
541 | * padata_register_cpumask_notifier - Registers a notifier that will be called |
542 | * if either pcpu or cbcpu or both cpumasks change. | |
16295bec | 543 | * |
e15bacbe DK |
544 | * @pinst: A poineter to padata instance |
545 | * @nblock: A pointer to notifier block. | |
16295bec | 546 | */ |
e15bacbe DK |
547 | int padata_register_cpumask_notifier(struct padata_instance *pinst, |
548 | struct notifier_block *nblock) | |
16295bec | 549 | { |
e15bacbe DK |
550 | return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, |
551 | nblock); | |
552 | } | |
553 | EXPORT_SYMBOL(padata_register_cpumask_notifier); | |
554 | ||
555 | /** | |
556 | * padata_unregister_cpumask_notifier - Unregisters cpumask notifier | |
557 | * registered earlier using padata_register_cpumask_notifier | |
558 | * | |
559 | * @pinst: A pointer to data instance. | |
560 | * @nlock: A pointer to notifier block. | |
561 | */ | |
562 | int padata_unregister_cpumask_notifier(struct padata_instance *pinst, | |
563 | struct notifier_block *nblock) | |
564 | { | |
565 | return blocking_notifier_chain_unregister( | |
566 | &pinst->cpumask_change_notifier, | |
567 | nblock); | |
568 | } | |
569 | EXPORT_SYMBOL(padata_unregister_cpumask_notifier); | |
570 | ||
571 | ||
33e54450 SK |
572 | /* If cpumask contains no active cpu, we mark the instance as invalid. */ |
573 | static bool padata_validate_cpumask(struct padata_instance *pinst, | |
574 | const struct cpumask *cpumask) | |
575 | { | |
13614e0f | 576 | if (!cpumask_intersects(cpumask, cpu_online_mask)) { |
33e54450 SK |
577 | pinst->flags |= PADATA_INVALID; |
578 | return false; | |
579 | } | |
580 | ||
581 | pinst->flags &= ~PADATA_INVALID; | |
582 | return true; | |
583 | } | |
584 | ||
65ff577e SK |
585 | static int __padata_set_cpumasks(struct padata_instance *pinst, |
586 | cpumask_var_t pcpumask, | |
587 | cpumask_var_t cbcpumask) | |
588 | { | |
589 | int valid; | |
16295bec | 590 | struct parallel_data *pd; |
65ff577e SK |
591 | |
592 | valid = padata_validate_cpumask(pinst, pcpumask); | |
593 | if (!valid) { | |
594 | __padata_stop(pinst); | |
595 | goto out_replace; | |
596 | } | |
597 | ||
598 | valid = padata_validate_cpumask(pinst, cbcpumask); | |
599 | if (!valid) | |
600 | __padata_stop(pinst); | |
601 | ||
602 | out_replace: | |
603 | pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); | |
604 | if (!pd) | |
605 | return -ENOMEM; | |
606 | ||
607 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); | |
608 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); | |
609 | ||
610 | padata_replace(pinst, pd); | |
611 | ||
612 | if (valid) | |
613 | __padata_start(pinst); | |
614 | ||
615 | return 0; | |
616 | } | |
617 | ||
e15bacbe DK |
618 | /** |
619 | * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value | |
620 | * equivalent to @cpumask. | |
16295bec SK |
621 | * |
622 | * @pinst: padata instance | |
e15bacbe DK |
623 | * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding |
624 | * to parallel and serial cpumasks respectively. | |
16295bec SK |
625 | * @cpumask: the cpumask to use |
626 | */ | |
e15bacbe DK |
627 | int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, |
628 | cpumask_var_t cpumask) | |
629 | { | |
630 | struct cpumask *serial_mask, *parallel_mask; | |
65ff577e SK |
631 | int err = -EINVAL; |
632 | ||
633 | mutex_lock(&pinst->lock); | |
6751fb3c SK |
634 | get_online_cpus(); |
635 | ||
e15bacbe DK |
636 | switch (cpumask_type) { |
637 | case PADATA_CPU_PARALLEL: | |
638 | serial_mask = pinst->cpumask.cbcpu; | |
639 | parallel_mask = cpumask; | |
640 | break; | |
641 | case PADATA_CPU_SERIAL: | |
642 | parallel_mask = pinst->cpumask.pcpu; | |
643 | serial_mask = cpumask; | |
644 | break; | |
645 | default: | |
65ff577e | 646 | goto out; |
16295bec SK |
647 | } |
648 | ||
65ff577e | 649 | err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); |
16295bec SK |
650 | |
651 | out: | |
6751fb3c | 652 | put_online_cpus(); |
16295bec SK |
653 | mutex_unlock(&pinst->lock); |
654 | ||
655 | return err; | |
656 | } | |
657 | EXPORT_SYMBOL(padata_set_cpumask); | |
658 | ||
19d795b6 AB |
659 | /** |
660 | * padata_start - start the parallel processing | |
661 | * | |
662 | * @pinst: padata instance to start | |
663 | */ | |
664 | int padata_start(struct padata_instance *pinst) | |
665 | { | |
666 | int err = 0; | |
667 | ||
668 | mutex_lock(&pinst->lock); | |
669 | ||
670 | if (pinst->flags & PADATA_INVALID) | |
671 | err = -EINVAL; | |
672 | ||
8ddab428 | 673 | __padata_start(pinst); |
19d795b6 AB |
674 | |
675 | mutex_unlock(&pinst->lock); | |
676 | ||
677 | return err; | |
678 | } | |
679 | EXPORT_SYMBOL(padata_start); | |
680 | ||
681 | /** | |
682 | * padata_stop - stop the parallel processing | |
683 | * | |
684 | * @pinst: padata instance to stop | |
685 | */ | |
686 | void padata_stop(struct padata_instance *pinst) | |
687 | { | |
688 | mutex_lock(&pinst->lock); | |
689 | __padata_stop(pinst); | |
690 | mutex_unlock(&pinst->lock); | |
691 | } | |
692 | EXPORT_SYMBOL(padata_stop); | |
693 | ||
694 | #ifdef CONFIG_HOTPLUG_CPU | |
695 | ||
16295bec SK |
696 | static int __padata_add_cpu(struct padata_instance *pinst, int cpu) |
697 | { | |
698 | struct parallel_data *pd; | |
699 | ||
13614e0f | 700 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
e15bacbe DK |
701 | pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, |
702 | pinst->cpumask.cbcpu); | |
16295bec SK |
703 | if (!pd) |
704 | return -ENOMEM; | |
705 | ||
706 | padata_replace(pinst, pd); | |
33e54450 | 707 | |
e15bacbe DK |
708 | if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && |
709 | padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) | |
33e54450 | 710 | __padata_start(pinst); |
16295bec SK |
711 | } |
712 | ||
713 | return 0; | |
714 | } | |
715 | ||
16295bec SK |
716 | static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) |
717 | { | |
33e54450 | 718 | struct parallel_data *pd = NULL; |
16295bec SK |
719 | |
720 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { | |
33e54450 | 721 | |
e15bacbe | 722 | if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || |
b89661df | 723 | !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) |
33e54450 | 724 | __padata_stop(pinst); |
33e54450 | 725 | |
e15bacbe DK |
726 | pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, |
727 | pinst->cpumask.cbcpu); | |
16295bec SK |
728 | if (!pd) |
729 | return -ENOMEM; | |
730 | ||
731 | padata_replace(pinst, pd); | |
96120905 SK |
732 | |
733 | cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); | |
734 | cpumask_clear_cpu(cpu, pd->cpumask.pcpu); | |
16295bec SK |
735 | } |
736 | ||
737 | return 0; | |
738 | } | |
739 | ||
e15bacbe | 740 | /** |
25985edc | 741 | * padata_remove_cpu - remove a cpu from the one or both(serial and parallel) |
e15bacbe | 742 | * padata cpumasks. |
16295bec SK |
743 | * |
744 | * @pinst: padata instance | |
745 | * @cpu: cpu to remove | |
e15bacbe DK |
746 | * @mask: bitmask specifying from which cpumask @cpu should be removed |
747 | * The @mask may be any combination of the following flags: | |
748 | * PADATA_CPU_SERIAL - serial cpumask | |
749 | * PADATA_CPU_PARALLEL - parallel cpumask | |
16295bec | 750 | */ |
e15bacbe | 751 | int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) |
16295bec SK |
752 | { |
753 | int err; | |
754 | ||
e15bacbe DK |
755 | if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) |
756 | return -EINVAL; | |
757 | ||
16295bec SK |
758 | mutex_lock(&pinst->lock); |
759 | ||
6751fb3c | 760 | get_online_cpus(); |
e15bacbe DK |
761 | if (mask & PADATA_CPU_SERIAL) |
762 | cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); | |
763 | if (mask & PADATA_CPU_PARALLEL) | |
764 | cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); | |
765 | ||
16295bec | 766 | err = __padata_remove_cpu(pinst, cpu); |
6751fb3c | 767 | put_online_cpus(); |
16295bec SK |
768 | |
769 | mutex_unlock(&pinst->lock); | |
770 | ||
771 | return err; | |
772 | } | |
773 | EXPORT_SYMBOL(padata_remove_cpu); | |
774 | ||
e15bacbe DK |
775 | static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) |
776 | { | |
777 | return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || | |
778 | cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); | |
779 | } | |
780 | ||
30e92153 | 781 | static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) |
16295bec | 782 | { |
16295bec | 783 | struct padata_instance *pinst; |
30e92153 | 784 | int ret; |
16295bec | 785 | |
30e92153 SAS |
786 | pinst = hlist_entry_safe(node, struct padata_instance, node); |
787 | if (!pinst_has_cpu(pinst, cpu)) | |
788 | return 0; | |
16295bec | 789 | |
30e92153 SAS |
790 | mutex_lock(&pinst->lock); |
791 | ret = __padata_add_cpu(pinst, cpu); | |
792 | mutex_unlock(&pinst->lock); | |
793 | return ret; | |
794 | } | |
16295bec | 795 | |
30e92153 SAS |
796 | static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node) |
797 | { | |
798 | struct padata_instance *pinst; | |
799 | int ret; | |
800 | ||
801 | pinst = hlist_entry_safe(node, struct padata_instance, node); | |
802 | if (!pinst_has_cpu(pinst, cpu)) | |
803 | return 0; | |
16295bec | 804 | |
30e92153 SAS |
805 | mutex_lock(&pinst->lock); |
806 | ret = __padata_remove_cpu(pinst, cpu); | |
807 | mutex_unlock(&pinst->lock); | |
808 | return ret; | |
16295bec | 809 | } |
30e92153 SAS |
810 | |
811 | static enum cpuhp_state hp_online; | |
e2cb2f1c | 812 | #endif |
16295bec | 813 | |
5e017dc3 DK |
814 | static void __padata_free(struct padata_instance *pinst) |
815 | { | |
816 | #ifdef CONFIG_HOTPLUG_CPU | |
30e92153 | 817 | cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); |
5e017dc3 DK |
818 | #endif |
819 | ||
820 | padata_stop(pinst); | |
821 | padata_free_pd(pinst->pd); | |
822 | free_cpumask_var(pinst->cpumask.pcpu); | |
823 | free_cpumask_var(pinst->cpumask.cbcpu); | |
45d153c0 DJ |
824 | destroy_workqueue(pinst->serial_wq); |
825 | destroy_workqueue(pinst->parallel_wq); | |
5e017dc3 DK |
826 | kfree(pinst); |
827 | } | |
828 | ||
829 | #define kobj2pinst(_kobj) \ | |
830 | container_of(_kobj, struct padata_instance, kobj) | |
831 | #define attr2pentry(_attr) \ | |
832 | container_of(_attr, struct padata_sysfs_entry, attr) | |
833 | ||
834 | static void padata_sysfs_release(struct kobject *kobj) | |
835 | { | |
836 | struct padata_instance *pinst = kobj2pinst(kobj); | |
837 | __padata_free(pinst); | |
838 | } | |
839 | ||
840 | struct padata_sysfs_entry { | |
841 | struct attribute attr; | |
842 | ssize_t (*show)(struct padata_instance *, struct attribute *, char *); | |
843 | ssize_t (*store)(struct padata_instance *, struct attribute *, | |
844 | const char *, size_t); | |
845 | }; | |
846 | ||
847 | static ssize_t show_cpumask(struct padata_instance *pinst, | |
848 | struct attribute *attr, char *buf) | |
849 | { | |
850 | struct cpumask *cpumask; | |
851 | ssize_t len; | |
852 | ||
853 | mutex_lock(&pinst->lock); | |
854 | if (!strcmp(attr->name, "serial_cpumask")) | |
855 | cpumask = pinst->cpumask.cbcpu; | |
856 | else | |
857 | cpumask = pinst->cpumask.pcpu; | |
858 | ||
4497da6f TH |
859 | len = snprintf(buf, PAGE_SIZE, "%*pb\n", |
860 | nr_cpu_ids, cpumask_bits(cpumask)); | |
5e017dc3 | 861 | mutex_unlock(&pinst->lock); |
4497da6f | 862 | return len < PAGE_SIZE ? len : -EINVAL; |
5e017dc3 DK |
863 | } |
864 | ||
865 | static ssize_t store_cpumask(struct padata_instance *pinst, | |
866 | struct attribute *attr, | |
867 | const char *buf, size_t count) | |
868 | { | |
869 | cpumask_var_t new_cpumask; | |
870 | ssize_t ret; | |
871 | int mask_type; | |
872 | ||
873 | if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) | |
874 | return -ENOMEM; | |
875 | ||
876 | ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), | |
877 | nr_cpumask_bits); | |
878 | if (ret < 0) | |
879 | goto out; | |
880 | ||
881 | mask_type = !strcmp(attr->name, "serial_cpumask") ? | |
882 | PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; | |
883 | ret = padata_set_cpumask(pinst, mask_type, new_cpumask); | |
884 | if (!ret) | |
885 | ret = count; | |
886 | ||
887 | out: | |
888 | free_cpumask_var(new_cpumask); | |
889 | return ret; | |
890 | } | |
891 | ||
892 | #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ | |
893 | static struct padata_sysfs_entry _name##_attr = \ | |
894 | __ATTR(_name, 0644, _show_name, _store_name) | |
895 | #define PADATA_ATTR_RO(_name, _show_name) \ | |
896 | static struct padata_sysfs_entry _name##_attr = \ | |
897 | __ATTR(_name, 0400, _show_name, NULL) | |
898 | ||
899 | PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); | |
900 | PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); | |
901 | ||
902 | /* | |
903 | * Padata sysfs provides the following objects: | |
904 | * serial_cpumask [RW] - cpumask for serial workers | |
905 | * parallel_cpumask [RW] - cpumask for parallel workers | |
906 | */ | |
907 | static struct attribute *padata_default_attrs[] = { | |
908 | &serial_cpumask_attr.attr, | |
909 | ¶llel_cpumask_attr.attr, | |
910 | NULL, | |
911 | }; | |
2064fbc7 | 912 | ATTRIBUTE_GROUPS(padata_default); |
5e017dc3 DK |
913 | |
914 | static ssize_t padata_sysfs_show(struct kobject *kobj, | |
915 | struct attribute *attr, char *buf) | |
916 | { | |
917 | struct padata_instance *pinst; | |
918 | struct padata_sysfs_entry *pentry; | |
919 | ssize_t ret = -EIO; | |
920 | ||
921 | pinst = kobj2pinst(kobj); | |
922 | pentry = attr2pentry(attr); | |
923 | if (pentry->show) | |
924 | ret = pentry->show(pinst, attr, buf); | |
925 | ||
926 | return ret; | |
927 | } | |
928 | ||
929 | static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, | |
930 | const char *buf, size_t count) | |
931 | { | |
932 | struct padata_instance *pinst; | |
933 | struct padata_sysfs_entry *pentry; | |
934 | ssize_t ret = -EIO; | |
935 | ||
936 | pinst = kobj2pinst(kobj); | |
937 | pentry = attr2pentry(attr); | |
938 | if (pentry->show) | |
939 | ret = pentry->store(pinst, attr, buf, count); | |
940 | ||
941 | return ret; | |
942 | } | |
943 | ||
944 | static const struct sysfs_ops padata_sysfs_ops = { | |
945 | .show = padata_sysfs_show, | |
946 | .store = padata_sysfs_store, | |
947 | }; | |
948 | ||
949 | static struct kobj_type padata_attr_type = { | |
950 | .sysfs_ops = &padata_sysfs_ops, | |
2064fbc7 | 951 | .default_groups = padata_default_groups, |
5e017dc3 DK |
952 | .release = padata_sysfs_release, |
953 | }; | |
954 | ||
e15bacbe | 955 | /** |
e6cc1170 SK |
956 | * padata_alloc - allocate and initialize a padata instance and specify |
957 | * cpumasks for serial and parallel workers. | |
16295bec | 958 | * |
b128a304 | 959 | * @name: used to identify the instance |
e15bacbe DK |
960 | * @pcpumask: cpumask that will be used for padata parallelization |
961 | * @cbcpumask: cpumask that will be used for padata serialization | |
16295bec | 962 | */ |
b128a304 | 963 | static struct padata_instance *padata_alloc(const char *name, |
9596695e TG |
964 | const struct cpumask *pcpumask, |
965 | const struct cpumask *cbcpumask) | |
16295bec | 966 | { |
16295bec | 967 | struct padata_instance *pinst; |
33e54450 | 968 | struct parallel_data *pd = NULL; |
16295bec SK |
969 | |
970 | pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); | |
971 | if (!pinst) | |
972 | goto err; | |
973 | ||
bfde23ce DJ |
974 | pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0, |
975 | name); | |
45d153c0 | 976 | if (!pinst->parallel_wq) |
16295bec | 977 | goto err_free_inst; |
b128a304 | 978 | |
cc491d8e DJ |
979 | get_online_cpus(); |
980 | ||
45d153c0 DJ |
981 | pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM | |
982 | WQ_CPU_INTENSIVE, 1, name); | |
983 | if (!pinst->serial_wq) | |
cc491d8e | 984 | goto err_put_cpus; |
45d153c0 DJ |
985 | |
986 | if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) | |
987 | goto err_free_serial_wq; | |
e15bacbe DK |
988 | if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { |
989 | free_cpumask_var(pinst->cpumask.pcpu); | |
45d153c0 | 990 | goto err_free_serial_wq; |
33e54450 | 991 | } |
e15bacbe DK |
992 | if (!padata_validate_cpumask(pinst, pcpumask) || |
993 | !padata_validate_cpumask(pinst, cbcpumask)) | |
994 | goto err_free_masks; | |
16295bec | 995 | |
e15bacbe DK |
996 | pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); |
997 | if (!pd) | |
998 | goto err_free_masks; | |
74781387 | 999 | |
16295bec SK |
1000 | rcu_assign_pointer(pinst->pd, pd); |
1001 | ||
e15bacbe DK |
1002 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); |
1003 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); | |
16295bec SK |
1004 | |
1005 | pinst->flags = 0; | |
1006 | ||
e15bacbe | 1007 | BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); |
5e017dc3 | 1008 | kobject_init(&pinst->kobj, &padata_attr_type); |
16295bec SK |
1009 | mutex_init(&pinst->lock); |
1010 | ||
b8b4a416 | 1011 | #ifdef CONFIG_HOTPLUG_CPU |
c5a81c8f | 1012 | cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node); |
b8b4a416 | 1013 | #endif |
cc491d8e DJ |
1014 | |
1015 | put_online_cpus(); | |
1016 | ||
16295bec SK |
1017 | return pinst; |
1018 | ||
e15bacbe DK |
1019 | err_free_masks: |
1020 | free_cpumask_var(pinst->cpumask.pcpu); | |
1021 | free_cpumask_var(pinst->cpumask.cbcpu); | |
45d153c0 DJ |
1022 | err_free_serial_wq: |
1023 | destroy_workqueue(pinst->serial_wq); | |
cc491d8e DJ |
1024 | err_put_cpus: |
1025 | put_online_cpus(); | |
45d153c0 | 1026 | destroy_workqueue(pinst->parallel_wq); |
16295bec SK |
1027 | err_free_inst: |
1028 | kfree(pinst); | |
1029 | err: | |
1030 | return NULL; | |
1031 | } | |
16295bec | 1032 | |
9596695e TG |
1033 | /** |
1034 | * padata_alloc_possible - Allocate and initialize padata instance. | |
1035 | * Use the cpu_possible_mask for serial and | |
1036 | * parallel workers. | |
1037 | * | |
b128a304 | 1038 | * @name: used to identify the instance |
9596695e | 1039 | */ |
b128a304 | 1040 | struct padata_instance *padata_alloc_possible(const char *name) |
9596695e | 1041 | { |
b128a304 | 1042 | return padata_alloc(name, cpu_possible_mask, cpu_possible_mask); |
9596695e TG |
1043 | } |
1044 | EXPORT_SYMBOL(padata_alloc_possible); | |
1045 | ||
0198ffd1 | 1046 | /** |
16295bec SK |
1047 | * padata_free - free a padata instance |
1048 | * | |
0198ffd1 | 1049 | * @padata_inst: padata instance to free |
16295bec SK |
1050 | */ |
1051 | void padata_free(struct padata_instance *pinst) | |
1052 | { | |
5e017dc3 | 1053 | kobject_put(&pinst->kobj); |
16295bec SK |
1054 | } |
1055 | EXPORT_SYMBOL(padata_free); | |
30e92153 SAS |
1056 | |
1057 | #ifdef CONFIG_HOTPLUG_CPU | |
1058 | ||
1059 | static __init int padata_driver_init(void) | |
1060 | { | |
1061 | int ret; | |
1062 | ||
1063 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online", | |
1064 | padata_cpu_online, | |
1065 | padata_cpu_prep_down); | |
1066 | if (ret < 0) | |
1067 | return ret; | |
1068 | hp_online = ret; | |
1069 | return 0; | |
1070 | } | |
1071 | module_init(padata_driver_init); | |
1072 | ||
1073 | static __exit void padata_driver_exit(void) | |
1074 | { | |
1075 | cpuhp_remove_multi_state(hp_online); | |
1076 | } | |
1077 | module_exit(padata_driver_exit); | |
1078 | #endif |