]>
Commit | Line | Data |
---|---|---|
16295bec SK |
1 | /* |
2 | * padata.c - generic interface to process data streams in parallel | |
3 | * | |
107f8bda SK |
4 | * See Documentation/padata.txt for an api documentation. |
5 | * | |
16295bec SK |
6 | * Copyright (C) 2008, 2009 secunet Security Networks AG |
7 | * Copyright (C) 2008, 2009 Steffen Klassert <[email protected]> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms and conditions of the GNU General Public License, | |
11 | * version 2, as published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along with | |
19 | * this program; if not, write to the Free Software Foundation, Inc., | |
20 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
21 | */ | |
22 | ||
9984de1a | 23 | #include <linux/export.h> |
16295bec SK |
24 | #include <linux/cpumask.h> |
25 | #include <linux/err.h> | |
26 | #include <linux/cpu.h> | |
27 | #include <linux/padata.h> | |
28 | #include <linux/mutex.h> | |
29 | #include <linux/sched.h> | |
5a0e3ad6 | 30 | #include <linux/slab.h> |
5e017dc3 | 31 | #include <linux/sysfs.h> |
16295bec SK |
32 | #include <linux/rcupdate.h> |
33 | ||
97e3d94a | 34 | #define MAX_OBJ_NUM 1000 |
16295bec SK |
35 | |
36 | static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) | |
37 | { | |
38 | int cpu, target_cpu; | |
39 | ||
e15bacbe | 40 | target_cpu = cpumask_first(pd->cpumask.pcpu); |
16295bec | 41 | for (cpu = 0; cpu < cpu_index; cpu++) |
e15bacbe | 42 | target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu); |
16295bec SK |
43 | |
44 | return target_cpu; | |
45 | } | |
46 | ||
2dc9b5db | 47 | static int padata_cpu_hash(struct parallel_data *pd) |
16295bec SK |
48 | { |
49 | int cpu_index; | |
16295bec SK |
50 | |
51 | /* | |
52 | * Hash the sequence numbers to the cpus by taking | |
53 | * seq_nr mod. number of cpus in use. | |
54 | */ | |
2dc9b5db SK |
55 | |
56 | spin_lock(&pd->seq_lock); | |
57 | cpu_index = pd->seq_nr % cpumask_weight(pd->cpumask.pcpu); | |
58 | pd->seq_nr++; | |
59 | spin_unlock(&pd->seq_lock); | |
16295bec SK |
60 | |
61 | return padata_index_to_cpu(pd, cpu_index); | |
62 | } | |
63 | ||
e15bacbe | 64 | static void padata_parallel_worker(struct work_struct *parallel_work) |
16295bec | 65 | { |
e15bacbe | 66 | struct padata_parallel_queue *pqueue; |
16295bec SK |
67 | struct parallel_data *pd; |
68 | struct padata_instance *pinst; | |
69 | LIST_HEAD(local_list); | |
70 | ||
71 | local_bh_disable(); | |
e15bacbe DK |
72 | pqueue = container_of(parallel_work, |
73 | struct padata_parallel_queue, work); | |
74 | pd = pqueue->pd; | |
16295bec SK |
75 | pinst = pd->pinst; |
76 | ||
e15bacbe DK |
77 | spin_lock(&pqueue->parallel.lock); |
78 | list_replace_init(&pqueue->parallel.list, &local_list); | |
79 | spin_unlock(&pqueue->parallel.lock); | |
16295bec SK |
80 | |
81 | while (!list_empty(&local_list)) { | |
82 | struct padata_priv *padata; | |
83 | ||
84 | padata = list_entry(local_list.next, | |
85 | struct padata_priv, list); | |
86 | ||
87 | list_del_init(&padata->list); | |
88 | ||
89 | padata->parallel(padata); | |
90 | } | |
91 | ||
92 | local_bh_enable(); | |
93 | } | |
94 | ||
0198ffd1 | 95 | /** |
16295bec SK |
96 | * padata_do_parallel - padata parallelization function |
97 | * | |
98 | * @pinst: padata instance | |
99 | * @padata: object to be parallelized | |
100 | * @cb_cpu: cpu the serialization callback function will run on, | |
e15bacbe | 101 | * must be in the serial cpumask of padata(i.e. cpumask.cbcpu). |
16295bec SK |
102 | * |
103 | * The parallelization callback function will run with BHs off. | |
104 | * Note: Every object which is parallelized by padata_do_parallel | |
105 | * must be seen by padata_do_serial. | |
106 | */ | |
107 | int padata_do_parallel(struct padata_instance *pinst, | |
108 | struct padata_priv *padata, int cb_cpu) | |
109 | { | |
110 | int target_cpu, err; | |
e15bacbe | 111 | struct padata_parallel_queue *queue; |
16295bec SK |
112 | struct parallel_data *pd; |
113 | ||
114 | rcu_read_lock_bh(); | |
115 | ||
116 | pd = rcu_dereference(pinst->pd); | |
117 | ||
83f619f3 | 118 | err = -EINVAL; |
7424713b | 119 | if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID) |
16295bec SK |
120 | goto out; |
121 | ||
e15bacbe | 122 | if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu)) |
16295bec SK |
123 | goto out; |
124 | ||
125 | err = -EBUSY; | |
126 | if ((pinst->flags & PADATA_RESET)) | |
127 | goto out; | |
128 | ||
129 | if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM) | |
130 | goto out; | |
131 | ||
83f619f3 | 132 | err = 0; |
16295bec SK |
133 | atomic_inc(&pd->refcnt); |
134 | padata->pd = pd; | |
135 | padata->cb_cpu = cb_cpu; | |
136 | ||
2dc9b5db | 137 | target_cpu = padata_cpu_hash(pd); |
e15bacbe | 138 | queue = per_cpu_ptr(pd->pqueue, target_cpu); |
16295bec SK |
139 | |
140 | spin_lock(&queue->parallel.lock); | |
141 | list_add_tail(&padata->list, &queue->parallel.list); | |
142 | spin_unlock(&queue->parallel.lock); | |
143 | ||
e15bacbe | 144 | queue_work_on(target_cpu, pinst->wq, &queue->work); |
16295bec SK |
145 | |
146 | out: | |
147 | rcu_read_unlock_bh(); | |
148 | ||
149 | return err; | |
150 | } | |
151 | EXPORT_SYMBOL(padata_do_parallel); | |
152 | ||
0198ffd1 SK |
153 | /* |
154 | * padata_get_next - Get the next object that needs serialization. | |
155 | * | |
156 | * Return values are: | |
157 | * | |
158 | * A pointer to the control struct of the next object that needs | |
159 | * serialization, if present in one of the percpu reorder queues. | |
160 | * | |
161 | * NULL, if all percpu reorder queues are empty. | |
162 | * | |
163 | * -EINPROGRESS, if the next object that needs serialization will | |
164 | * be parallel processed by another cpu and is not yet present in | |
165 | * the cpu's reorder queue. | |
166 | * | |
167 | * -ENODATA, if this cpu has to do the parallel processing for | |
168 | * the next object. | |
169 | */ | |
16295bec SK |
170 | static struct padata_priv *padata_get_next(struct parallel_data *pd) |
171 | { | |
5f1a8c1b | 172 | int cpu, num_cpus; |
2dc9b5db | 173 | unsigned int next_nr, next_index; |
e15bacbe | 174 | struct padata_parallel_queue *queue, *next_queue; |
16295bec SK |
175 | struct padata_priv *padata; |
176 | struct padata_list *reorder; | |
177 | ||
e15bacbe | 178 | num_cpus = cpumask_weight(pd->cpumask.pcpu); |
16295bec | 179 | |
5f1a8c1b SK |
180 | /* |
181 | * Calculate the percpu reorder queue and the sequence | |
182 | * number of the next object. | |
183 | */ | |
184 | next_nr = pd->processed; | |
185 | next_index = next_nr % num_cpus; | |
186 | cpu = padata_index_to_cpu(pd, next_index); | |
e15bacbe | 187 | next_queue = per_cpu_ptr(pd->pqueue, cpu); |
5f1a8c1b | 188 | |
16295bec SK |
189 | padata = NULL; |
190 | ||
16295bec SK |
191 | reorder = &next_queue->reorder; |
192 | ||
193 | if (!list_empty(&reorder->list)) { | |
194 | padata = list_entry(reorder->list.next, | |
195 | struct padata_priv, list); | |
196 | ||
16295bec SK |
197 | spin_lock(&reorder->lock); |
198 | list_del_init(&padata->list); | |
199 | atomic_dec(&pd->reorder_objects); | |
200 | spin_unlock(&reorder->lock); | |
201 | ||
5f1a8c1b | 202 | pd->processed++; |
16295bec SK |
203 | |
204 | goto out; | |
205 | } | |
206 | ||
e15bacbe | 207 | queue = per_cpu_ptr(pd->pqueue, smp_processor_id()); |
d46a5ac7 | 208 | if (queue->cpu_index == next_queue->cpu_index) { |
16295bec SK |
209 | padata = ERR_PTR(-ENODATA); |
210 | goto out; | |
211 | } | |
212 | ||
213 | padata = ERR_PTR(-EINPROGRESS); | |
214 | out: | |
215 | return padata; | |
216 | } | |
217 | ||
218 | static void padata_reorder(struct parallel_data *pd) | |
219 | { | |
3047817b | 220 | int cb_cpu; |
16295bec | 221 | struct padata_priv *padata; |
e15bacbe | 222 | struct padata_serial_queue *squeue; |
16295bec SK |
223 | struct padata_instance *pinst = pd->pinst; |
224 | ||
0198ffd1 SK |
225 | /* |
226 | * We need to ensure that only one cpu can work on dequeueing of | |
227 | * the reorder queue the time. Calculating in which percpu reorder | |
228 | * queue the next object will arrive takes some time. A spinlock | |
229 | * would be highly contended. Also it is not clear in which order | |
230 | * the objects arrive to the reorder queues. So a cpu could wait to | |
231 | * get the lock just to notice that there is nothing to do at the | |
232 | * moment. Therefore we use a trylock and let the holder of the lock | |
233 | * care for all the objects enqueued during the holdtime of the lock. | |
234 | */ | |
16295bec | 235 | if (!spin_trylock_bh(&pd->lock)) |
d46a5ac7 | 236 | return; |
16295bec SK |
237 | |
238 | while (1) { | |
239 | padata = padata_get_next(pd); | |
240 | ||
0198ffd1 SK |
241 | /* |
242 | * All reorder queues are empty, or the next object that needs | |
243 | * serialization is parallel processed by another cpu and is | |
244 | * still on it's way to the cpu's reorder queue, nothing to | |
245 | * do for now. | |
246 | */ | |
16295bec SK |
247 | if (!padata || PTR_ERR(padata) == -EINPROGRESS) |
248 | break; | |
249 | ||
0198ffd1 SK |
250 | /* |
251 | * This cpu has to do the parallel processing of the next | |
252 | * object. It's waiting in the cpu's parallelization queue, | |
25985edc | 253 | * so exit immediately. |
0198ffd1 | 254 | */ |
16295bec | 255 | if (PTR_ERR(padata) == -ENODATA) { |
d46a5ac7 | 256 | del_timer(&pd->timer); |
16295bec | 257 | spin_unlock_bh(&pd->lock); |
d46a5ac7 | 258 | return; |
16295bec SK |
259 | } |
260 | ||
3047817b SK |
261 | cb_cpu = padata->cb_cpu; |
262 | squeue = per_cpu_ptr(pd->squeue, cb_cpu); | |
16295bec | 263 | |
e15bacbe DK |
264 | spin_lock(&squeue->serial.lock); |
265 | list_add_tail(&padata->list, &squeue->serial.list); | |
266 | spin_unlock(&squeue->serial.lock); | |
16295bec | 267 | |
3047817b | 268 | queue_work_on(cb_cpu, pinst->wq, &squeue->work); |
16295bec SK |
269 | } |
270 | ||
271 | spin_unlock_bh(&pd->lock); | |
272 | ||
0198ffd1 SK |
273 | /* |
274 | * The next object that needs serialization might have arrived to | |
275 | * the reorder queues in the meantime, we will be called again | |
25985edc | 276 | * from the timer function if no one else cares for it. |
0198ffd1 | 277 | */ |
d46a5ac7 SK |
278 | if (atomic_read(&pd->reorder_objects) |
279 | && !(pinst->flags & PADATA_RESET)) | |
280 | mod_timer(&pd->timer, jiffies + HZ); | |
281 | else | |
282 | del_timer(&pd->timer); | |
16295bec | 283 | |
16295bec SK |
284 | return; |
285 | } | |
286 | ||
d46a5ac7 SK |
287 | static void padata_reorder_timer(unsigned long arg) |
288 | { | |
289 | struct parallel_data *pd = (struct parallel_data *)arg; | |
290 | ||
291 | padata_reorder(pd); | |
292 | } | |
293 | ||
e15bacbe | 294 | static void padata_serial_worker(struct work_struct *serial_work) |
16295bec | 295 | { |
e15bacbe | 296 | struct padata_serial_queue *squeue; |
16295bec SK |
297 | struct parallel_data *pd; |
298 | LIST_HEAD(local_list); | |
299 | ||
300 | local_bh_disable(); | |
e15bacbe DK |
301 | squeue = container_of(serial_work, struct padata_serial_queue, work); |
302 | pd = squeue->pd; | |
16295bec | 303 | |
e15bacbe DK |
304 | spin_lock(&squeue->serial.lock); |
305 | list_replace_init(&squeue->serial.list, &local_list); | |
306 | spin_unlock(&squeue->serial.lock); | |
16295bec SK |
307 | |
308 | while (!list_empty(&local_list)) { | |
309 | struct padata_priv *padata; | |
310 | ||
311 | padata = list_entry(local_list.next, | |
312 | struct padata_priv, list); | |
313 | ||
314 | list_del_init(&padata->list); | |
315 | ||
316 | padata->serial(padata); | |
317 | atomic_dec(&pd->refcnt); | |
318 | } | |
319 | local_bh_enable(); | |
320 | } | |
321 | ||
0198ffd1 | 322 | /** |
16295bec SK |
323 | * padata_do_serial - padata serialization function |
324 | * | |
325 | * @padata: object to be serialized. | |
326 | * | |
327 | * padata_do_serial must be called for every parallelized object. | |
328 | * The serialization callback function will run with BHs off. | |
329 | */ | |
330 | void padata_do_serial(struct padata_priv *padata) | |
331 | { | |
332 | int cpu; | |
e15bacbe | 333 | struct padata_parallel_queue *pqueue; |
16295bec SK |
334 | struct parallel_data *pd; |
335 | ||
336 | pd = padata->pd; | |
337 | ||
338 | cpu = get_cpu(); | |
e15bacbe | 339 | pqueue = per_cpu_ptr(pd->pqueue, cpu); |
16295bec | 340 | |
e15bacbe | 341 | spin_lock(&pqueue->reorder.lock); |
16295bec | 342 | atomic_inc(&pd->reorder_objects); |
e15bacbe DK |
343 | list_add_tail(&padata->list, &pqueue->reorder.list); |
344 | spin_unlock(&pqueue->reorder.lock); | |
16295bec SK |
345 | |
346 | put_cpu(); | |
347 | ||
348 | padata_reorder(pd); | |
349 | } | |
350 | EXPORT_SYMBOL(padata_do_serial); | |
351 | ||
e15bacbe DK |
352 | static int padata_setup_cpumasks(struct parallel_data *pd, |
353 | const struct cpumask *pcpumask, | |
354 | const struct cpumask *cbcpumask) | |
16295bec | 355 | { |
e15bacbe DK |
356 | if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) |
357 | return -ENOMEM; | |
16295bec | 358 | |
13614e0f | 359 | cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); |
e15bacbe DK |
360 | if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { |
361 | free_cpumask_var(pd->cpumask.cbcpu); | |
362 | return -ENOMEM; | |
363 | } | |
16295bec | 364 | |
13614e0f | 365 | cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask); |
e15bacbe DK |
366 | return 0; |
367 | } | |
16295bec | 368 | |
e15bacbe DK |
369 | static void __padata_list_init(struct padata_list *pd_list) |
370 | { | |
371 | INIT_LIST_HEAD(&pd_list->list); | |
372 | spin_lock_init(&pd_list->lock); | |
373 | } | |
16295bec | 374 | |
e15bacbe DK |
375 | /* Initialize all percpu queues used by serial workers */ |
376 | static void padata_init_squeues(struct parallel_data *pd) | |
377 | { | |
378 | int cpu; | |
379 | struct padata_serial_queue *squeue; | |
7b389b2c | 380 | |
e15bacbe DK |
381 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
382 | squeue = per_cpu_ptr(pd->squeue, cpu); | |
383 | squeue->pd = pd; | |
384 | __padata_list_init(&squeue->serial); | |
385 | INIT_WORK(&squeue->work, padata_serial_worker); | |
386 | } | |
387 | } | |
16295bec | 388 | |
e15bacbe DK |
389 | /* Initialize all percpu queues used by parallel workers */ |
390 | static void padata_init_pqueues(struct parallel_data *pd) | |
391 | { | |
2dc9b5db | 392 | int cpu_index, cpu; |
e15bacbe | 393 | struct padata_parallel_queue *pqueue; |
16295bec | 394 | |
e15bacbe DK |
395 | cpu_index = 0; |
396 | for_each_cpu(cpu, pd->cpumask.pcpu) { | |
397 | pqueue = per_cpu_ptr(pd->pqueue, cpu); | |
398 | pqueue->pd = pd; | |
399 | pqueue->cpu_index = cpu_index; | |
7b389b2c | 400 | cpu_index++; |
16295bec | 401 | |
e15bacbe DK |
402 | __padata_list_init(&pqueue->reorder); |
403 | __padata_list_init(&pqueue->parallel); | |
404 | INIT_WORK(&pqueue->work, padata_parallel_worker); | |
405 | atomic_set(&pqueue->num_obj, 0); | |
16295bec | 406 | } |
e15bacbe | 407 | } |
16295bec | 408 | |
e15bacbe DK |
409 | /* Allocate and initialize the internal cpumask dependend resources. */ |
410 | static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst, | |
411 | const struct cpumask *pcpumask, | |
412 | const struct cpumask *cbcpumask) | |
413 | { | |
414 | struct parallel_data *pd; | |
16295bec | 415 | |
e15bacbe DK |
416 | pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); |
417 | if (!pd) | |
418 | goto err; | |
16295bec | 419 | |
e15bacbe DK |
420 | pd->pqueue = alloc_percpu(struct padata_parallel_queue); |
421 | if (!pd->pqueue) | |
422 | goto err_free_pd; | |
423 | ||
424 | pd->squeue = alloc_percpu(struct padata_serial_queue); | |
425 | if (!pd->squeue) | |
426 | goto err_free_pqueue; | |
427 | if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0) | |
428 | goto err_free_squeue; | |
16295bec | 429 | |
e15bacbe DK |
430 | padata_init_pqueues(pd); |
431 | padata_init_squeues(pd); | |
d46a5ac7 | 432 | setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd); |
2dc9b5db | 433 | pd->seq_nr = 0; |
16295bec SK |
434 | atomic_set(&pd->reorder_objects, 0); |
435 | atomic_set(&pd->refcnt, 0); | |
436 | pd->pinst = pinst; | |
437 | spin_lock_init(&pd->lock); | |
438 | ||
439 | return pd; | |
440 | ||
e15bacbe DK |
441 | err_free_squeue: |
442 | free_percpu(pd->squeue); | |
443 | err_free_pqueue: | |
444 | free_percpu(pd->pqueue); | |
16295bec SK |
445 | err_free_pd: |
446 | kfree(pd); | |
447 | err: | |
448 | return NULL; | |
449 | } | |
450 | ||
451 | static void padata_free_pd(struct parallel_data *pd) | |
452 | { | |
e15bacbe DK |
453 | free_cpumask_var(pd->cpumask.pcpu); |
454 | free_cpumask_var(pd->cpumask.cbcpu); | |
455 | free_percpu(pd->pqueue); | |
456 | free_percpu(pd->squeue); | |
16295bec SK |
457 | kfree(pd); |
458 | } | |
459 | ||
0198ffd1 | 460 | /* Flush all objects out of the padata queues. */ |
2b73b07a SK |
461 | static void padata_flush_queues(struct parallel_data *pd) |
462 | { | |
463 | int cpu; | |
e15bacbe DK |
464 | struct padata_parallel_queue *pqueue; |
465 | struct padata_serial_queue *squeue; | |
2b73b07a | 466 | |
e15bacbe DK |
467 | for_each_cpu(cpu, pd->cpumask.pcpu) { |
468 | pqueue = per_cpu_ptr(pd->pqueue, cpu); | |
469 | flush_work(&pqueue->work); | |
2b73b07a SK |
470 | } |
471 | ||
472 | del_timer_sync(&pd->timer); | |
473 | ||
474 | if (atomic_read(&pd->reorder_objects)) | |
475 | padata_reorder(pd); | |
476 | ||
e15bacbe DK |
477 | for_each_cpu(cpu, pd->cpumask.cbcpu) { |
478 | squeue = per_cpu_ptr(pd->squeue, cpu); | |
479 | flush_work(&squeue->work); | |
2b73b07a SK |
480 | } |
481 | ||
482 | BUG_ON(atomic_read(&pd->refcnt) != 0); | |
483 | } | |
484 | ||
4c879170 SK |
485 | static void __padata_start(struct padata_instance *pinst) |
486 | { | |
487 | pinst->flags |= PADATA_INIT; | |
488 | } | |
489 | ||
ee836555 SK |
490 | static void __padata_stop(struct padata_instance *pinst) |
491 | { | |
492 | if (!(pinst->flags & PADATA_INIT)) | |
493 | return; | |
494 | ||
495 | pinst->flags &= ~PADATA_INIT; | |
496 | ||
497 | synchronize_rcu(); | |
498 | ||
499 | get_online_cpus(); | |
500 | padata_flush_queues(pinst->pd); | |
501 | put_online_cpus(); | |
502 | } | |
503 | ||
25985edc | 504 | /* Replace the internal control structure with a new one. */ |
16295bec SK |
505 | static void padata_replace(struct padata_instance *pinst, |
506 | struct parallel_data *pd_new) | |
507 | { | |
508 | struct parallel_data *pd_old = pinst->pd; | |
e15bacbe | 509 | int notification_mask = 0; |
16295bec SK |
510 | |
511 | pinst->flags |= PADATA_RESET; | |
512 | ||
513 | rcu_assign_pointer(pinst->pd, pd_new); | |
514 | ||
515 | synchronize_rcu(); | |
516 | ||
e15bacbe DK |
517 | if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu)) |
518 | notification_mask |= PADATA_CPU_PARALLEL; | |
519 | if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu)) | |
520 | notification_mask |= PADATA_CPU_SERIAL; | |
521 | ||
2b73b07a | 522 | padata_flush_queues(pd_old); |
16295bec SK |
523 | padata_free_pd(pd_old); |
524 | ||
e15bacbe DK |
525 | if (notification_mask) |
526 | blocking_notifier_call_chain(&pinst->cpumask_change_notifier, | |
c635696c SK |
527 | notification_mask, |
528 | &pd_new->cpumask); | |
16295bec SK |
529 | |
530 | pinst->flags &= ~PADATA_RESET; | |
531 | } | |
532 | ||
0198ffd1 | 533 | /** |
e15bacbe DK |
534 | * padata_register_cpumask_notifier - Registers a notifier that will be called |
535 | * if either pcpu or cbcpu or both cpumasks change. | |
16295bec | 536 | * |
e15bacbe DK |
537 | * @pinst: A poineter to padata instance |
538 | * @nblock: A pointer to notifier block. | |
16295bec | 539 | */ |
e15bacbe DK |
540 | int padata_register_cpumask_notifier(struct padata_instance *pinst, |
541 | struct notifier_block *nblock) | |
16295bec | 542 | { |
e15bacbe DK |
543 | return blocking_notifier_chain_register(&pinst->cpumask_change_notifier, |
544 | nblock); | |
545 | } | |
546 | EXPORT_SYMBOL(padata_register_cpumask_notifier); | |
547 | ||
548 | /** | |
549 | * padata_unregister_cpumask_notifier - Unregisters cpumask notifier | |
550 | * registered earlier using padata_register_cpumask_notifier | |
551 | * | |
552 | * @pinst: A pointer to data instance. | |
553 | * @nlock: A pointer to notifier block. | |
554 | */ | |
555 | int padata_unregister_cpumask_notifier(struct padata_instance *pinst, | |
556 | struct notifier_block *nblock) | |
557 | { | |
558 | return blocking_notifier_chain_unregister( | |
559 | &pinst->cpumask_change_notifier, | |
560 | nblock); | |
561 | } | |
562 | EXPORT_SYMBOL(padata_unregister_cpumask_notifier); | |
563 | ||
564 | ||
33e54450 SK |
565 | /* If cpumask contains no active cpu, we mark the instance as invalid. */ |
566 | static bool padata_validate_cpumask(struct padata_instance *pinst, | |
567 | const struct cpumask *cpumask) | |
568 | { | |
13614e0f | 569 | if (!cpumask_intersects(cpumask, cpu_online_mask)) { |
33e54450 SK |
570 | pinst->flags |= PADATA_INVALID; |
571 | return false; | |
572 | } | |
573 | ||
574 | pinst->flags &= ~PADATA_INVALID; | |
575 | return true; | |
576 | } | |
577 | ||
65ff577e SK |
578 | static int __padata_set_cpumasks(struct padata_instance *pinst, |
579 | cpumask_var_t pcpumask, | |
580 | cpumask_var_t cbcpumask) | |
581 | { | |
582 | int valid; | |
16295bec | 583 | struct parallel_data *pd; |
65ff577e SK |
584 | |
585 | valid = padata_validate_cpumask(pinst, pcpumask); | |
586 | if (!valid) { | |
587 | __padata_stop(pinst); | |
588 | goto out_replace; | |
589 | } | |
590 | ||
591 | valid = padata_validate_cpumask(pinst, cbcpumask); | |
592 | if (!valid) | |
593 | __padata_stop(pinst); | |
594 | ||
595 | out_replace: | |
596 | pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); | |
597 | if (!pd) | |
598 | return -ENOMEM; | |
599 | ||
600 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); | |
601 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); | |
602 | ||
603 | padata_replace(pinst, pd); | |
604 | ||
605 | if (valid) | |
606 | __padata_start(pinst); | |
607 | ||
608 | return 0; | |
609 | } | |
610 | ||
611 | /** | |
612 | * padata_set_cpumasks - Set both parallel and serial cpumasks. The first | |
613 | * one is used by parallel workers and the second one | |
614 | * by the wokers doing serialization. | |
615 | * | |
616 | * @pinst: padata instance | |
617 | * @pcpumask: the cpumask to use for parallel workers | |
618 | * @cbcpumask: the cpumsak to use for serial workers | |
619 | */ | |
620 | int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask, | |
621 | cpumask_var_t cbcpumask) | |
622 | { | |
623 | int err; | |
16295bec | 624 | |
16295bec | 625 | mutex_lock(&pinst->lock); |
65ff577e | 626 | get_online_cpus(); |
16295bec | 627 | |
65ff577e SK |
628 | err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask); |
629 | ||
630 | put_online_cpus(); | |
631 | mutex_unlock(&pinst->lock); | |
632 | ||
633 | return err; | |
634 | ||
635 | } | |
636 | EXPORT_SYMBOL(padata_set_cpumasks); | |
637 | ||
e15bacbe DK |
638 | /** |
639 | * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value | |
640 | * equivalent to @cpumask. | |
16295bec SK |
641 | * |
642 | * @pinst: padata instance | |
e15bacbe DK |
643 | * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding |
644 | * to parallel and serial cpumasks respectively. | |
16295bec SK |
645 | * @cpumask: the cpumask to use |
646 | */ | |
e15bacbe DK |
647 | int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, |
648 | cpumask_var_t cpumask) | |
649 | { | |
650 | struct cpumask *serial_mask, *parallel_mask; | |
65ff577e SK |
651 | int err = -EINVAL; |
652 | ||
653 | mutex_lock(&pinst->lock); | |
6751fb3c SK |
654 | get_online_cpus(); |
655 | ||
e15bacbe DK |
656 | switch (cpumask_type) { |
657 | case PADATA_CPU_PARALLEL: | |
658 | serial_mask = pinst->cpumask.cbcpu; | |
659 | parallel_mask = cpumask; | |
660 | break; | |
661 | case PADATA_CPU_SERIAL: | |
662 | parallel_mask = pinst->cpumask.pcpu; | |
663 | serial_mask = cpumask; | |
664 | break; | |
665 | default: | |
65ff577e | 666 | goto out; |
16295bec SK |
667 | } |
668 | ||
65ff577e | 669 | err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask); |
16295bec SK |
670 | |
671 | out: | |
6751fb3c | 672 | put_online_cpus(); |
16295bec SK |
673 | mutex_unlock(&pinst->lock); |
674 | ||
675 | return err; | |
676 | } | |
677 | EXPORT_SYMBOL(padata_set_cpumask); | |
678 | ||
679 | static int __padata_add_cpu(struct padata_instance *pinst, int cpu) | |
680 | { | |
681 | struct parallel_data *pd; | |
682 | ||
13614e0f | 683 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { |
e15bacbe DK |
684 | pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, |
685 | pinst->cpumask.cbcpu); | |
16295bec SK |
686 | if (!pd) |
687 | return -ENOMEM; | |
688 | ||
689 | padata_replace(pinst, pd); | |
33e54450 | 690 | |
e15bacbe DK |
691 | if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) && |
692 | padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) | |
33e54450 | 693 | __padata_start(pinst); |
16295bec SK |
694 | } |
695 | ||
696 | return 0; | |
697 | } | |
698 | ||
e15bacbe DK |
699 | /** |
700 | * padata_add_cpu - add a cpu to one or both(parallel and serial) | |
701 | * padata cpumasks. | |
16295bec SK |
702 | * |
703 | * @pinst: padata instance | |
704 | * @cpu: cpu to add | |
e15bacbe DK |
705 | * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added. |
706 | * The @mask may be any combination of the following flags: | |
707 | * PADATA_CPU_SERIAL - serial cpumask | |
708 | * PADATA_CPU_PARALLEL - parallel cpumask | |
16295bec | 709 | */ |
e15bacbe DK |
710 | |
711 | int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask) | |
16295bec SK |
712 | { |
713 | int err; | |
714 | ||
e15bacbe DK |
715 | if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) |
716 | return -EINVAL; | |
717 | ||
16295bec SK |
718 | mutex_lock(&pinst->lock); |
719 | ||
6751fb3c | 720 | get_online_cpus(); |
e15bacbe DK |
721 | if (mask & PADATA_CPU_SERIAL) |
722 | cpumask_set_cpu(cpu, pinst->cpumask.cbcpu); | |
723 | if (mask & PADATA_CPU_PARALLEL) | |
724 | cpumask_set_cpu(cpu, pinst->cpumask.pcpu); | |
725 | ||
16295bec | 726 | err = __padata_add_cpu(pinst, cpu); |
6751fb3c | 727 | put_online_cpus(); |
16295bec SK |
728 | |
729 | mutex_unlock(&pinst->lock); | |
730 | ||
731 | return err; | |
732 | } | |
733 | EXPORT_SYMBOL(padata_add_cpu); | |
734 | ||
735 | static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) | |
736 | { | |
33e54450 | 737 | struct parallel_data *pd = NULL; |
16295bec SK |
738 | |
739 | if (cpumask_test_cpu(cpu, cpu_online_mask)) { | |
33e54450 | 740 | |
e15bacbe | 741 | if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) || |
b89661df | 742 | !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu)) |
33e54450 | 743 | __padata_stop(pinst); |
33e54450 | 744 | |
e15bacbe DK |
745 | pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu, |
746 | pinst->cpumask.cbcpu); | |
16295bec SK |
747 | if (!pd) |
748 | return -ENOMEM; | |
749 | ||
750 | padata_replace(pinst, pd); | |
96120905 SK |
751 | |
752 | cpumask_clear_cpu(cpu, pd->cpumask.cbcpu); | |
753 | cpumask_clear_cpu(cpu, pd->cpumask.pcpu); | |
16295bec SK |
754 | } |
755 | ||
756 | return 0; | |
757 | } | |
758 | ||
e15bacbe | 759 | /** |
25985edc | 760 | * padata_remove_cpu - remove a cpu from the one or both(serial and parallel) |
e15bacbe | 761 | * padata cpumasks. |
16295bec SK |
762 | * |
763 | * @pinst: padata instance | |
764 | * @cpu: cpu to remove | |
e15bacbe DK |
765 | * @mask: bitmask specifying from which cpumask @cpu should be removed |
766 | * The @mask may be any combination of the following flags: | |
767 | * PADATA_CPU_SERIAL - serial cpumask | |
768 | * PADATA_CPU_PARALLEL - parallel cpumask | |
16295bec | 769 | */ |
e15bacbe | 770 | int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask) |
16295bec SK |
771 | { |
772 | int err; | |
773 | ||
e15bacbe DK |
774 | if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL))) |
775 | return -EINVAL; | |
776 | ||
16295bec SK |
777 | mutex_lock(&pinst->lock); |
778 | ||
6751fb3c | 779 | get_online_cpus(); |
e15bacbe DK |
780 | if (mask & PADATA_CPU_SERIAL) |
781 | cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu); | |
782 | if (mask & PADATA_CPU_PARALLEL) | |
783 | cpumask_clear_cpu(cpu, pinst->cpumask.pcpu); | |
784 | ||
16295bec | 785 | err = __padata_remove_cpu(pinst, cpu); |
6751fb3c | 786 | put_online_cpus(); |
16295bec SK |
787 | |
788 | mutex_unlock(&pinst->lock); | |
789 | ||
790 | return err; | |
791 | } | |
792 | EXPORT_SYMBOL(padata_remove_cpu); | |
793 | ||
0198ffd1 | 794 | /** |
16295bec SK |
795 | * padata_start - start the parallel processing |
796 | * | |
797 | * @pinst: padata instance to start | |
798 | */ | |
4c879170 | 799 | int padata_start(struct padata_instance *pinst) |
16295bec | 800 | { |
4c879170 SK |
801 | int err = 0; |
802 | ||
16295bec | 803 | mutex_lock(&pinst->lock); |
4c879170 SK |
804 | |
805 | if (pinst->flags & PADATA_INVALID) | |
806 | err =-EINVAL; | |
807 | ||
808 | __padata_start(pinst); | |
809 | ||
16295bec | 810 | mutex_unlock(&pinst->lock); |
4c879170 SK |
811 | |
812 | return err; | |
16295bec SK |
813 | } |
814 | EXPORT_SYMBOL(padata_start); | |
815 | ||
0198ffd1 | 816 | /** |
16295bec SK |
817 | * padata_stop - stop the parallel processing |
818 | * | |
819 | * @pinst: padata instance to stop | |
820 | */ | |
821 | void padata_stop(struct padata_instance *pinst) | |
822 | { | |
16295bec | 823 | mutex_lock(&pinst->lock); |
ee836555 | 824 | __padata_stop(pinst); |
16295bec SK |
825 | mutex_unlock(&pinst->lock); |
826 | } | |
827 | EXPORT_SYMBOL(padata_stop); | |
828 | ||
e2cb2f1c | 829 | #ifdef CONFIG_HOTPLUG_CPU |
e15bacbe DK |
830 | |
831 | static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu) | |
832 | { | |
833 | return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) || | |
834 | cpumask_test_cpu(cpu, pinst->cpumask.cbcpu); | |
835 | } | |
836 | ||
837 | ||
975d2603 HK |
838 | static int padata_cpu_callback(struct notifier_block *nfb, |
839 | unsigned long action, void *hcpu) | |
16295bec SK |
840 | { |
841 | int err; | |
842 | struct padata_instance *pinst; | |
843 | int cpu = (unsigned long)hcpu; | |
844 | ||
845 | pinst = container_of(nfb, struct padata_instance, cpu_notifier); | |
846 | ||
847 | switch (action) { | |
848 | case CPU_ONLINE: | |
849 | case CPU_ONLINE_FROZEN: | |
e15bacbe | 850 | if (!pinst_has_cpu(pinst, cpu)) |
16295bec SK |
851 | break; |
852 | mutex_lock(&pinst->lock); | |
853 | err = __padata_add_cpu(pinst, cpu); | |
854 | mutex_unlock(&pinst->lock); | |
855 | if (err) | |
80b5184c | 856 | return notifier_from_errno(err); |
16295bec SK |
857 | break; |
858 | ||
859 | case CPU_DOWN_PREPARE: | |
860 | case CPU_DOWN_PREPARE_FROZEN: | |
e15bacbe | 861 | if (!pinst_has_cpu(pinst, cpu)) |
16295bec SK |
862 | break; |
863 | mutex_lock(&pinst->lock); | |
864 | err = __padata_remove_cpu(pinst, cpu); | |
865 | mutex_unlock(&pinst->lock); | |
866 | if (err) | |
80b5184c | 867 | return notifier_from_errno(err); |
16295bec SK |
868 | break; |
869 | ||
870 | case CPU_UP_CANCELED: | |
871 | case CPU_UP_CANCELED_FROZEN: | |
e15bacbe | 872 | if (!pinst_has_cpu(pinst, cpu)) |
16295bec SK |
873 | break; |
874 | mutex_lock(&pinst->lock); | |
875 | __padata_remove_cpu(pinst, cpu); | |
876 | mutex_unlock(&pinst->lock); | |
877 | ||
878 | case CPU_DOWN_FAILED: | |
879 | case CPU_DOWN_FAILED_FROZEN: | |
e15bacbe | 880 | if (!pinst_has_cpu(pinst, cpu)) |
16295bec SK |
881 | break; |
882 | mutex_lock(&pinst->lock); | |
883 | __padata_add_cpu(pinst, cpu); | |
884 | mutex_unlock(&pinst->lock); | |
885 | } | |
886 | ||
887 | return NOTIFY_OK; | |
888 | } | |
e2cb2f1c | 889 | #endif |
16295bec | 890 | |
5e017dc3 DK |
891 | static void __padata_free(struct padata_instance *pinst) |
892 | { | |
893 | #ifdef CONFIG_HOTPLUG_CPU | |
894 | unregister_hotcpu_notifier(&pinst->cpu_notifier); | |
895 | #endif | |
896 | ||
897 | padata_stop(pinst); | |
898 | padata_free_pd(pinst->pd); | |
899 | free_cpumask_var(pinst->cpumask.pcpu); | |
900 | free_cpumask_var(pinst->cpumask.cbcpu); | |
901 | kfree(pinst); | |
902 | } | |
903 | ||
904 | #define kobj2pinst(_kobj) \ | |
905 | container_of(_kobj, struct padata_instance, kobj) | |
906 | #define attr2pentry(_attr) \ | |
907 | container_of(_attr, struct padata_sysfs_entry, attr) | |
908 | ||
909 | static void padata_sysfs_release(struct kobject *kobj) | |
910 | { | |
911 | struct padata_instance *pinst = kobj2pinst(kobj); | |
912 | __padata_free(pinst); | |
913 | } | |
914 | ||
915 | struct padata_sysfs_entry { | |
916 | struct attribute attr; | |
917 | ssize_t (*show)(struct padata_instance *, struct attribute *, char *); | |
918 | ssize_t (*store)(struct padata_instance *, struct attribute *, | |
919 | const char *, size_t); | |
920 | }; | |
921 | ||
922 | static ssize_t show_cpumask(struct padata_instance *pinst, | |
923 | struct attribute *attr, char *buf) | |
924 | { | |
925 | struct cpumask *cpumask; | |
926 | ssize_t len; | |
927 | ||
928 | mutex_lock(&pinst->lock); | |
929 | if (!strcmp(attr->name, "serial_cpumask")) | |
930 | cpumask = pinst->cpumask.cbcpu; | |
931 | else | |
932 | cpumask = pinst->cpumask.pcpu; | |
933 | ||
934 | len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask), | |
935 | nr_cpu_ids); | |
936 | if (PAGE_SIZE - len < 2) | |
937 | len = -EINVAL; | |
938 | else | |
939 | len += sprintf(buf + len, "\n"); | |
940 | ||
941 | mutex_unlock(&pinst->lock); | |
942 | return len; | |
943 | } | |
944 | ||
945 | static ssize_t store_cpumask(struct padata_instance *pinst, | |
946 | struct attribute *attr, | |
947 | const char *buf, size_t count) | |
948 | { | |
949 | cpumask_var_t new_cpumask; | |
950 | ssize_t ret; | |
951 | int mask_type; | |
952 | ||
953 | if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL)) | |
954 | return -ENOMEM; | |
955 | ||
956 | ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask), | |
957 | nr_cpumask_bits); | |
958 | if (ret < 0) | |
959 | goto out; | |
960 | ||
961 | mask_type = !strcmp(attr->name, "serial_cpumask") ? | |
962 | PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL; | |
963 | ret = padata_set_cpumask(pinst, mask_type, new_cpumask); | |
964 | if (!ret) | |
965 | ret = count; | |
966 | ||
967 | out: | |
968 | free_cpumask_var(new_cpumask); | |
969 | return ret; | |
970 | } | |
971 | ||
972 | #define PADATA_ATTR_RW(_name, _show_name, _store_name) \ | |
973 | static struct padata_sysfs_entry _name##_attr = \ | |
974 | __ATTR(_name, 0644, _show_name, _store_name) | |
975 | #define PADATA_ATTR_RO(_name, _show_name) \ | |
976 | static struct padata_sysfs_entry _name##_attr = \ | |
977 | __ATTR(_name, 0400, _show_name, NULL) | |
978 | ||
979 | PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask); | |
980 | PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask); | |
981 | ||
982 | /* | |
983 | * Padata sysfs provides the following objects: | |
984 | * serial_cpumask [RW] - cpumask for serial workers | |
985 | * parallel_cpumask [RW] - cpumask for parallel workers | |
986 | */ | |
987 | static struct attribute *padata_default_attrs[] = { | |
988 | &serial_cpumask_attr.attr, | |
989 | ¶llel_cpumask_attr.attr, | |
990 | NULL, | |
991 | }; | |
992 | ||
993 | static ssize_t padata_sysfs_show(struct kobject *kobj, | |
994 | struct attribute *attr, char *buf) | |
995 | { | |
996 | struct padata_instance *pinst; | |
997 | struct padata_sysfs_entry *pentry; | |
998 | ssize_t ret = -EIO; | |
999 | ||
1000 | pinst = kobj2pinst(kobj); | |
1001 | pentry = attr2pentry(attr); | |
1002 | if (pentry->show) | |
1003 | ret = pentry->show(pinst, attr, buf); | |
1004 | ||
1005 | return ret; | |
1006 | } | |
1007 | ||
1008 | static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, | |
1009 | const char *buf, size_t count) | |
1010 | { | |
1011 | struct padata_instance *pinst; | |
1012 | struct padata_sysfs_entry *pentry; | |
1013 | ssize_t ret = -EIO; | |
1014 | ||
1015 | pinst = kobj2pinst(kobj); | |
1016 | pentry = attr2pentry(attr); | |
1017 | if (pentry->show) | |
1018 | ret = pentry->store(pinst, attr, buf, count); | |
1019 | ||
1020 | return ret; | |
1021 | } | |
1022 | ||
1023 | static const struct sysfs_ops padata_sysfs_ops = { | |
1024 | .show = padata_sysfs_show, | |
1025 | .store = padata_sysfs_store, | |
1026 | }; | |
1027 | ||
1028 | static struct kobj_type padata_attr_type = { | |
1029 | .sysfs_ops = &padata_sysfs_ops, | |
1030 | .default_attrs = padata_default_attrs, | |
1031 | .release = padata_sysfs_release, | |
1032 | }; | |
1033 | ||
0198ffd1 | 1034 | /** |
e6cc1170 SK |
1035 | * padata_alloc_possible - Allocate and initialize padata instance. |
1036 | * Use the cpu_possible_mask for serial and | |
1037 | * parallel workers. | |
16295bec | 1038 | * |
16295bec SK |
1039 | * @wq: workqueue to use for the allocated padata instance |
1040 | */ | |
e6cc1170 | 1041 | struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) |
e15bacbe | 1042 | { |
e6cc1170 | 1043 | return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); |
e15bacbe | 1044 | } |
e6cc1170 | 1045 | EXPORT_SYMBOL(padata_alloc_possible); |
e15bacbe DK |
1046 | |
1047 | /** | |
e6cc1170 SK |
1048 | * padata_alloc - allocate and initialize a padata instance and specify |
1049 | * cpumasks for serial and parallel workers. | |
16295bec | 1050 | * |
16295bec | 1051 | * @wq: workqueue to use for the allocated padata instance |
e15bacbe DK |
1052 | * @pcpumask: cpumask that will be used for padata parallelization |
1053 | * @cbcpumask: cpumask that will be used for padata serialization | |
16295bec | 1054 | */ |
e6cc1170 SK |
1055 | struct padata_instance *padata_alloc(struct workqueue_struct *wq, |
1056 | const struct cpumask *pcpumask, | |
1057 | const struct cpumask *cbcpumask) | |
16295bec | 1058 | { |
16295bec | 1059 | struct padata_instance *pinst; |
33e54450 | 1060 | struct parallel_data *pd = NULL; |
16295bec SK |
1061 | |
1062 | pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL); | |
1063 | if (!pinst) | |
1064 | goto err; | |
1065 | ||
6751fb3c | 1066 | get_online_cpus(); |
e15bacbe | 1067 | if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL)) |
16295bec | 1068 | goto err_free_inst; |
e15bacbe DK |
1069 | if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) { |
1070 | free_cpumask_var(pinst->cpumask.pcpu); | |
16295bec | 1071 | goto err_free_inst; |
33e54450 | 1072 | } |
e15bacbe DK |
1073 | if (!padata_validate_cpumask(pinst, pcpumask) || |
1074 | !padata_validate_cpumask(pinst, cbcpumask)) | |
1075 | goto err_free_masks; | |
16295bec | 1076 | |
e15bacbe DK |
1077 | pd = padata_alloc_pd(pinst, pcpumask, cbcpumask); |
1078 | if (!pd) | |
1079 | goto err_free_masks; | |
74781387 | 1080 | |
16295bec SK |
1081 | rcu_assign_pointer(pinst->pd, pd); |
1082 | ||
1083 | pinst->wq = wq; | |
1084 | ||
e15bacbe DK |
1085 | cpumask_copy(pinst->cpumask.pcpu, pcpumask); |
1086 | cpumask_copy(pinst->cpumask.cbcpu, cbcpumask); | |
16295bec SK |
1087 | |
1088 | pinst->flags = 0; | |
1089 | ||
e2cb2f1c | 1090 | #ifdef CONFIG_HOTPLUG_CPU |
16295bec SK |
1091 | pinst->cpu_notifier.notifier_call = padata_cpu_callback; |
1092 | pinst->cpu_notifier.priority = 0; | |
e2cb2f1c SK |
1093 | register_hotcpu_notifier(&pinst->cpu_notifier); |
1094 | #endif | |
16295bec | 1095 | |
6751fb3c SK |
1096 | put_online_cpus(); |
1097 | ||
e15bacbe | 1098 | BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); |
5e017dc3 | 1099 | kobject_init(&pinst->kobj, &padata_attr_type); |
16295bec SK |
1100 | mutex_init(&pinst->lock); |
1101 | ||
1102 | return pinst; | |
1103 | ||
e15bacbe DK |
1104 | err_free_masks: |
1105 | free_cpumask_var(pinst->cpumask.pcpu); | |
1106 | free_cpumask_var(pinst->cpumask.cbcpu); | |
16295bec SK |
1107 | err_free_inst: |
1108 | kfree(pinst); | |
6751fb3c | 1109 | put_online_cpus(); |
16295bec SK |
1110 | err: |
1111 | return NULL; | |
1112 | } | |
1113 | EXPORT_SYMBOL(padata_alloc); | |
1114 | ||
0198ffd1 | 1115 | /** |
16295bec SK |
1116 | * padata_free - free a padata instance |
1117 | * | |
0198ffd1 | 1118 | * @padata_inst: padata instance to free |
16295bec SK |
1119 | */ |
1120 | void padata_free(struct padata_instance *pinst) | |
1121 | { | |
5e017dc3 | 1122 | kobject_put(&pinst->kobj); |
16295bec SK |
1123 | } |
1124 | EXPORT_SYMBOL(padata_free); |