]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* CPU control. |
2 | * (C) 2001, 2002, 2003, 2004 Rusty Russell | |
3 | * | |
4 | * This code is licenced under the GPL. | |
5 | */ | |
6 | #include <linux/proc_fs.h> | |
7 | #include <linux/smp.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/notifier.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/unistd.h> | |
12 | #include <linux/cpu.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/kthread.h> | |
15 | #include <linux/stop_machine.h> | |
81615b62 | 16 | #include <linux/mutex.h> |
1da177e4 | 17 | |
98a79d6a | 18 | #ifdef CONFIG_SMP |
b3199c02 | 19 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
aa953877 | 20 | static DEFINE_MUTEX(cpu_add_remove_lock); |
1da177e4 | 21 | |
bd5349cf | 22 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); |
1da177e4 | 23 | |
e3920fb4 RW |
24 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
25 | * Should always be manipulated under cpu_add_remove_lock | |
26 | */ | |
27 | static int cpu_hotplug_disabled; | |
28 | ||
d221938c GS |
29 | static struct { |
30 | struct task_struct *active_writer; | |
31 | struct mutex lock; /* Synchronizes accesses to refcount, */ | |
32 | /* | |
33 | * Also blocks the new readers during | |
34 | * an ongoing cpu hotplug operation. | |
35 | */ | |
36 | int refcount; | |
31950eb6 LT |
37 | } cpu_hotplug = { |
38 | .active_writer = NULL, | |
39 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), | |
40 | .refcount = 0, | |
41 | }; | |
d221938c GS |
42 | |
43 | #ifdef CONFIG_HOTPLUG_CPU | |
90d45d17 | 44 | |
86ef5c9a | 45 | void get_online_cpus(void) |
a9d9baa1 | 46 | { |
d221938c GS |
47 | might_sleep(); |
48 | if (cpu_hotplug.active_writer == current) | |
aa953877 | 49 | return; |
d221938c GS |
50 | mutex_lock(&cpu_hotplug.lock); |
51 | cpu_hotplug.refcount++; | |
52 | mutex_unlock(&cpu_hotplug.lock); | |
53 | ||
a9d9baa1 | 54 | } |
86ef5c9a | 55 | EXPORT_SYMBOL_GPL(get_online_cpus); |
90d45d17 | 56 | |
86ef5c9a | 57 | void put_online_cpus(void) |
a9d9baa1 | 58 | { |
d221938c | 59 | if (cpu_hotplug.active_writer == current) |
aa953877 | 60 | return; |
d221938c | 61 | mutex_lock(&cpu_hotplug.lock); |
d2ba7e2a ON |
62 | if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) |
63 | wake_up_process(cpu_hotplug.active_writer); | |
d221938c GS |
64 | mutex_unlock(&cpu_hotplug.lock); |
65 | ||
a9d9baa1 | 66 | } |
86ef5c9a | 67 | EXPORT_SYMBOL_GPL(put_online_cpus); |
a9d9baa1 | 68 | |
a9d9baa1 | 69 | #endif /* CONFIG_HOTPLUG_CPU */ |
90d45d17 | 70 | |
d221938c GS |
71 | /* |
72 | * The following two API's must be used when attempting | |
b3199c02 | 73 | * to serialize the updates to cpu_online_mask, cpu_present_mask. |
d221938c GS |
74 | */ |
75 | void cpu_maps_update_begin(void) | |
76 | { | |
77 | mutex_lock(&cpu_add_remove_lock); | |
78 | } | |
79 | ||
80 | void cpu_maps_update_done(void) | |
81 | { | |
82 | mutex_unlock(&cpu_add_remove_lock); | |
83 | } | |
84 | ||
85 | /* | |
86 | * This ensures that the hotplug operation can begin only when the | |
87 | * refcount goes to zero. | |
88 | * | |
89 | * Note that during a cpu-hotplug operation, the new readers, if any, | |
90 | * will be blocked by the cpu_hotplug.lock | |
91 | * | |
d2ba7e2a ON |
92 | * Since cpu_hotplug_begin() is always called after invoking |
93 | * cpu_maps_update_begin(), we can be sure that only one writer is active. | |
d221938c GS |
94 | * |
95 | * Note that theoretically, there is a possibility of a livelock: | |
96 | * - Refcount goes to zero, last reader wakes up the sleeping | |
97 | * writer. | |
98 | * - Last reader unlocks the cpu_hotplug.lock. | |
99 | * - A new reader arrives at this moment, bumps up the refcount. | |
100 | * - The writer acquires the cpu_hotplug.lock finds the refcount | |
101 | * non zero and goes to sleep again. | |
102 | * | |
103 | * However, this is very difficult to achieve in practice since | |
86ef5c9a | 104 | * get_online_cpus() not an api which is called all that often. |
d221938c GS |
105 | * |
106 | */ | |
107 | static void cpu_hotplug_begin(void) | |
108 | { | |
d221938c | 109 | cpu_hotplug.active_writer = current; |
d2ba7e2a ON |
110 | |
111 | for (;;) { | |
112 | mutex_lock(&cpu_hotplug.lock); | |
113 | if (likely(!cpu_hotplug.refcount)) | |
114 | break; | |
115 | __set_current_state(TASK_UNINTERRUPTIBLE); | |
d221938c GS |
116 | mutex_unlock(&cpu_hotplug.lock); |
117 | schedule(); | |
d221938c | 118 | } |
d221938c GS |
119 | } |
120 | ||
121 | static void cpu_hotplug_done(void) | |
122 | { | |
123 | cpu_hotplug.active_writer = NULL; | |
124 | mutex_unlock(&cpu_hotplug.lock); | |
125 | } | |
1da177e4 | 126 | /* Need to know about CPUs going up/down? */ |
f7b16c10 | 127 | int __ref register_cpu_notifier(struct notifier_block *nb) |
1da177e4 | 128 | { |
bd5349cf | 129 | int ret; |
d221938c | 130 | cpu_maps_update_begin(); |
bd5349cf | 131 | ret = raw_notifier_chain_register(&cpu_chain, nb); |
d221938c | 132 | cpu_maps_update_done(); |
bd5349cf | 133 | return ret; |
1da177e4 | 134 | } |
65edc68c CS |
135 | |
136 | #ifdef CONFIG_HOTPLUG_CPU | |
137 | ||
1da177e4 LT |
138 | EXPORT_SYMBOL(register_cpu_notifier); |
139 | ||
9647155f | 140 | void __ref unregister_cpu_notifier(struct notifier_block *nb) |
1da177e4 | 141 | { |
d221938c | 142 | cpu_maps_update_begin(); |
bd5349cf | 143 | raw_notifier_chain_unregister(&cpu_chain, nb); |
d221938c | 144 | cpu_maps_update_done(); |
1da177e4 LT |
145 | } |
146 | EXPORT_SYMBOL(unregister_cpu_notifier); | |
147 | ||
1da177e4 LT |
148 | static inline void check_for_tasks(int cpu) |
149 | { | |
150 | struct task_struct *p; | |
151 | ||
152 | write_lock_irq(&tasklist_lock); | |
153 | for_each_process(p) { | |
154 | if (task_cpu(p) == cpu && | |
155 | (!cputime_eq(p->utime, cputime_zero) || | |
156 | !cputime_eq(p->stime, cputime_zero))) | |
157 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ | |
e7407dcc | 158 | (state = %ld, flags = %x) \n", |
ba25f9dc PE |
159 | p->comm, task_pid_nr(p), cpu, |
160 | p->state, p->flags); | |
1da177e4 LT |
161 | } |
162 | write_unlock_irq(&tasklist_lock); | |
163 | } | |
164 | ||
db912f96 AK |
165 | struct take_cpu_down_param { |
166 | unsigned long mod; | |
167 | void *hcpu; | |
168 | }; | |
169 | ||
1da177e4 | 170 | /* Take this CPU down. */ |
514a20a5 | 171 | static int __ref take_cpu_down(void *_param) |
1da177e4 | 172 | { |
db912f96 | 173 | struct take_cpu_down_param *param = _param; |
1da177e4 LT |
174 | int err; |
175 | ||
1da177e4 LT |
176 | /* Ensure this CPU doesn't handle any more interrupts. */ |
177 | err = __cpu_disable(); | |
178 | if (err < 0) | |
f3705136 | 179 | return err; |
1da177e4 | 180 | |
3ba35573 MS |
181 | raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, |
182 | param->hcpu); | |
183 | ||
f3705136 ZM |
184 | /* Force idle task to run as soon as we yield: it should |
185 | immediately notice cpu is offline and die quickly. */ | |
186 | sched_idle_next(); | |
187 | return 0; | |
1da177e4 LT |
188 | } |
189 | ||
e3920fb4 | 190 | /* Requires cpu_add_remove_lock to be held */ |
514a20a5 | 191 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
1da177e4 | 192 | { |
e7407dcc | 193 | int err, nr_calls = 0; |
e0b582ec | 194 | cpumask_var_t old_allowed; |
e7407dcc | 195 | void *hcpu = (void *)(long)cpu; |
8bb78442 | 196 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
db912f96 AK |
197 | struct take_cpu_down_param tcd_param = { |
198 | .mod = mod, | |
199 | .hcpu = hcpu, | |
200 | }; | |
1da177e4 | 201 | |
e3920fb4 RW |
202 | if (num_online_cpus() == 1) |
203 | return -EBUSY; | |
1da177e4 | 204 | |
e3920fb4 RW |
205 | if (!cpu_online(cpu)) |
206 | return -EINVAL; | |
1da177e4 | 207 | |
e0b582ec RR |
208 | if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) |
209 | return -ENOMEM; | |
210 | ||
d221938c | 211 | cpu_hotplug_begin(); |
8bb78442 | 212 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, |
e7407dcc | 213 | hcpu, -1, &nr_calls); |
1da177e4 | 214 | if (err == NOTIFY_BAD) { |
a0d8cdb6 | 215 | nr_calls--; |
8bb78442 RW |
216 | __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
217 | hcpu, nr_calls, NULL); | |
1da177e4 | 218 | printk("%s: attempt to take down CPU %u failed\n", |
af1f16d0 | 219 | __func__, cpu); |
baaca49f GS |
220 | err = -EINVAL; |
221 | goto out_release; | |
1da177e4 LT |
222 | } |
223 | ||
224 | /* Ensure that we are not runnable on dying cpu */ | |
e0b582ec RR |
225 | cpumask_copy(old_allowed, ¤t->cpus_allowed); |
226 | set_cpus_allowed_ptr(current, | |
227 | cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); | |
1da177e4 | 228 | |
e0b582ec | 229 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
04321587 | 230 | if (err) { |
1da177e4 | 231 | /* CPU didn't die: tell everyone. Can't complain. */ |
8bb78442 | 232 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
e7407dcc | 233 | hcpu) == NOTIFY_BAD) |
1da177e4 LT |
234 | BUG(); |
235 | ||
ffdb5976 | 236 | goto out_allowed; |
8fa1d7d3 | 237 | } |
04321587 | 238 | BUG_ON(cpu_online(cpu)); |
1da177e4 LT |
239 | |
240 | /* Wait for it to sleep (leaving idle task). */ | |
241 | while (!idle_cpu(cpu)) | |
242 | yield(); | |
243 | ||
244 | /* This actually kills the CPU. */ | |
245 | __cpu_die(cpu); | |
246 | ||
1da177e4 | 247 | /* CPU is completely dead: tell everyone. Too late to complain. */ |
8bb78442 RW |
248 | if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, |
249 | hcpu) == NOTIFY_BAD) | |
1da177e4 LT |
250 | BUG(); |
251 | ||
252 | check_for_tasks(cpu); | |
253 | ||
1da177e4 | 254 | out_allowed: |
e0b582ec | 255 | set_cpus_allowed_ptr(current, old_allowed); |
baaca49f | 256 | out_release: |
d221938c | 257 | cpu_hotplug_done(); |
3da1c84c ON |
258 | if (!err) { |
259 | if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, | |
260 | hcpu) == NOTIFY_BAD) | |
261 | BUG(); | |
262 | } | |
e0b582ec | 263 | free_cpumask_var(old_allowed); |
e3920fb4 RW |
264 | return err; |
265 | } | |
266 | ||
514a20a5 | 267 | int __ref cpu_down(unsigned int cpu) |
e3920fb4 | 268 | { |
9ea09af3 | 269 | int err; |
e3920fb4 | 270 | |
9ea09af3 HC |
271 | err = stop_machine_create(); |
272 | if (err) | |
273 | return err; | |
d221938c | 274 | cpu_maps_update_begin(); |
e761b772 MK |
275 | |
276 | if (cpu_hotplug_disabled) { | |
e3920fb4 | 277 | err = -EBUSY; |
e761b772 MK |
278 | goto out; |
279 | } | |
280 | ||
2b17fa50 | 281 | set_cpu_active(cpu, false); |
e761b772 | 282 | |
39b0fad7 MK |
283 | /* |
284 | * Make sure the all cpus did the reschedule and are not | |
e0b582ec | 285 | * using stale version of the cpu_active_mask. |
39b0fad7 MK |
286 | * This is not strictly necessary becuase stop_machine() |
287 | * that we run down the line already provides the required | |
288 | * synchronization. But it's really a side effect and we do not | |
289 | * want to depend on the innards of the stop_machine here. | |
290 | */ | |
291 | synchronize_sched(); | |
e3920fb4 | 292 | |
e761b772 | 293 | err = _cpu_down(cpu, 0); |
e3920fb4 | 294 | |
e761b772 | 295 | if (cpu_online(cpu)) |
2b17fa50 | 296 | set_cpu_active(cpu, true); |
e761b772 MK |
297 | |
298 | out: | |
d221938c | 299 | cpu_maps_update_done(); |
9ea09af3 | 300 | stop_machine_destroy(); |
1da177e4 LT |
301 | return err; |
302 | } | |
b62b8ef9 | 303 | EXPORT_SYMBOL(cpu_down); |
1da177e4 LT |
304 | #endif /*CONFIG_HOTPLUG_CPU*/ |
305 | ||
e3920fb4 | 306 | /* Requires cpu_add_remove_lock to be held */ |
8bb78442 | 307 | static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) |
1da177e4 | 308 | { |
baaca49f | 309 | int ret, nr_calls = 0; |
1da177e4 | 310 | void *hcpu = (void *)(long)cpu; |
8bb78442 | 311 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
1da177e4 | 312 | |
e3920fb4 RW |
313 | if (cpu_online(cpu) || !cpu_present(cpu)) |
314 | return -EINVAL; | |
90d45d17 | 315 | |
d221938c | 316 | cpu_hotplug_begin(); |
8bb78442 | 317 | ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, |
baaca49f | 318 | -1, &nr_calls); |
1da177e4 | 319 | if (ret == NOTIFY_BAD) { |
a0d8cdb6 | 320 | nr_calls--; |
1da177e4 | 321 | printk("%s: attempt to bring up CPU %u failed\n", |
af1f16d0 | 322 | __func__, cpu); |
1da177e4 LT |
323 | ret = -EINVAL; |
324 | goto out_notify; | |
325 | } | |
326 | ||
327 | /* Arch-specific enabling code. */ | |
328 | ret = __cpu_up(cpu); | |
329 | if (ret != 0) | |
330 | goto out_notify; | |
6978c705 | 331 | BUG_ON(!cpu_online(cpu)); |
1da177e4 | 332 | |
2b17fa50 | 333 | set_cpu_active(cpu, true); |
279ef6bb | 334 | |
1da177e4 | 335 | /* Now call notifier in preparation. */ |
8bb78442 | 336 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); |
1da177e4 LT |
337 | |
338 | out_notify: | |
339 | if (ret != 0) | |
baaca49f | 340 | __raw_notifier_call_chain(&cpu_chain, |
8bb78442 | 341 | CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); |
d221938c | 342 | cpu_hotplug_done(); |
e3920fb4 RW |
343 | |
344 | return ret; | |
345 | } | |
346 | ||
b282b6f8 | 347 | int __cpuinit cpu_up(unsigned int cpu) |
e3920fb4 RW |
348 | { |
349 | int err = 0; | |
e0b582ec | 350 | if (!cpu_possible(cpu)) { |
73e753a5 KH |
351 | printk(KERN_ERR "can't online cpu %d because it is not " |
352 | "configured as may-hotadd at boot time\n", cpu); | |
3ee1062b | 353 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) |
73e753a5 KH |
354 | printk(KERN_ERR "please check additional_cpus= boot " |
355 | "parameter\n"); | |
356 | #endif | |
357 | return -EINVAL; | |
358 | } | |
e3920fb4 | 359 | |
d221938c | 360 | cpu_maps_update_begin(); |
e761b772 MK |
361 | |
362 | if (cpu_hotplug_disabled) { | |
e3920fb4 | 363 | err = -EBUSY; |
e761b772 MK |
364 | goto out; |
365 | } | |
366 | ||
367 | err = _cpu_up(cpu, 0); | |
368 | ||
e761b772 | 369 | out: |
d221938c | 370 | cpu_maps_update_done(); |
e3920fb4 RW |
371 | return err; |
372 | } | |
373 | ||
f3de4be9 | 374 | #ifdef CONFIG_PM_SLEEP_SMP |
e0b582ec | 375 | static cpumask_var_t frozen_cpus; |
e3920fb4 RW |
376 | |
377 | int disable_nonboot_cpus(void) | |
378 | { | |
a0e280e0 | 379 | int cpu, first_cpu, error; |
e3920fb4 | 380 | |
a0e280e0 HC |
381 | error = stop_machine_create(); |
382 | if (error) | |
383 | return error; | |
d221938c | 384 | cpu_maps_update_begin(); |
e0b582ec | 385 | first_cpu = cpumask_first(cpu_online_mask); |
e3920fb4 RW |
386 | /* We take down all of the non-boot CPUs in one shot to avoid races |
387 | * with the userspace trying to use the CPU hotplug at the same time | |
388 | */ | |
e0b582ec | 389 | cpumask_clear(frozen_cpus); |
e3920fb4 RW |
390 | printk("Disabling non-boot CPUs ...\n"); |
391 | for_each_online_cpu(cpu) { | |
392 | if (cpu == first_cpu) | |
393 | continue; | |
8bb78442 | 394 | error = _cpu_down(cpu, 1); |
e3920fb4 | 395 | if (!error) { |
e0b582ec | 396 | cpumask_set_cpu(cpu, frozen_cpus); |
e3920fb4 RW |
397 | printk("CPU%d is down\n", cpu); |
398 | } else { | |
399 | printk(KERN_ERR "Error taking CPU%d down: %d\n", | |
400 | cpu, error); | |
401 | break; | |
402 | } | |
403 | } | |
404 | if (!error) { | |
405 | BUG_ON(num_online_cpus() > 1); | |
406 | /* Make sure the CPUs won't be enabled by someone else */ | |
407 | cpu_hotplug_disabled = 1; | |
408 | } else { | |
e1d9fd2e | 409 | printk(KERN_ERR "Non-boot CPUs are not disabled\n"); |
e3920fb4 | 410 | } |
d221938c | 411 | cpu_maps_update_done(); |
a0e280e0 | 412 | stop_machine_destroy(); |
e3920fb4 RW |
413 | return error; |
414 | } | |
415 | ||
fa7303e2 | 416 | void __ref enable_nonboot_cpus(void) |
e3920fb4 RW |
417 | { |
418 | int cpu, error; | |
419 | ||
420 | /* Allow everyone to use the CPU hotplug again */ | |
d221938c | 421 | cpu_maps_update_begin(); |
e3920fb4 | 422 | cpu_hotplug_disabled = 0; |
e0b582ec | 423 | if (cpumask_empty(frozen_cpus)) |
1d64b9cb | 424 | goto out; |
e3920fb4 RW |
425 | |
426 | printk("Enabling non-boot CPUs ...\n"); | |
e0b582ec | 427 | for_each_cpu(cpu, frozen_cpus) { |
8bb78442 | 428 | error = _cpu_up(cpu, 1); |
e3920fb4 RW |
429 | if (!error) { |
430 | printk("CPU%d is up\n", cpu); | |
431 | continue; | |
432 | } | |
1d64b9cb | 433 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); |
e3920fb4 | 434 | } |
e0b582ec | 435 | cpumask_clear(frozen_cpus); |
1d64b9cb | 436 | out: |
d221938c | 437 | cpu_maps_update_done(); |
1da177e4 | 438 | } |
e0b582ec RR |
439 | |
440 | static int alloc_frozen_cpus(void) | |
441 | { | |
442 | if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) | |
443 | return -ENOMEM; | |
444 | return 0; | |
445 | } | |
446 | core_initcall(alloc_frozen_cpus); | |
f3de4be9 | 447 | #endif /* CONFIG_PM_SLEEP_SMP */ |
68f4f1ec | 448 | |
e545a614 MS |
449 | /** |
450 | * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers | |
451 | * @cpu: cpu that just started | |
452 | * | |
453 | * This function calls the cpu_chain notifiers with CPU_STARTING. | |
454 | * It must be called by the arch code on the new cpu, before the new cpu | |
455 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | |
456 | */ | |
84196414 | 457 | void __cpuinit notify_cpu_starting(unsigned int cpu) |
e545a614 MS |
458 | { |
459 | unsigned long val = CPU_STARTING; | |
460 | ||
461 | #ifdef CONFIG_PM_SLEEP_SMP | |
e0b582ec | 462 | if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) |
e545a614 MS |
463 | val = CPU_STARTING_FROZEN; |
464 | #endif /* CONFIG_PM_SLEEP_SMP */ | |
465 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); | |
466 | } | |
467 | ||
68f4f1ec | 468 | #endif /* CONFIG_SMP */ |
b8d317d1 | 469 | |
e56b3bc7 LT |
470 | /* |
471 | * cpu_bit_bitmap[] is a special, "compressed" data structure that | |
472 | * represents all NR_CPUS bits binary values of 1<<nr. | |
473 | * | |
e0b582ec | 474 | * It is used by cpumask_of() to get a constant address to a CPU |
e56b3bc7 LT |
475 | * mask value that has a single bit set only. |
476 | */ | |
b8d317d1 | 477 | |
e56b3bc7 LT |
478 | /* cpu_bit_bitmap[0] is empty - so we can back into it */ |
479 | #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x) | |
480 | #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) | |
481 | #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) | |
482 | #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) | |
b8d317d1 | 483 | |
e56b3bc7 LT |
484 | const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { |
485 | ||
486 | MASK_DECLARE_8(0), MASK_DECLARE_8(8), | |
487 | MASK_DECLARE_8(16), MASK_DECLARE_8(24), | |
488 | #if BITS_PER_LONG > 32 | |
489 | MASK_DECLARE_8(32), MASK_DECLARE_8(40), | |
490 | MASK_DECLARE_8(48), MASK_DECLARE_8(56), | |
b8d317d1 MT |
491 | #endif |
492 | }; | |
e56b3bc7 | 493 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); |
2d3854a3 RR |
494 | |
495 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; | |
496 | EXPORT_SYMBOL(cpu_all_bits); | |
b3199c02 RR |
497 | |
498 | #ifdef CONFIG_INIT_ALL_POSSIBLE | |
499 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly | |
500 | = CPU_BITS_ALL; | |
501 | #else | |
502 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; | |
503 | #endif | |
504 | const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); | |
505 | EXPORT_SYMBOL(cpu_possible_mask); | |
506 | ||
507 | static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; | |
508 | const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); | |
509 | EXPORT_SYMBOL(cpu_online_mask); | |
510 | ||
511 | static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; | |
512 | const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); | |
513 | EXPORT_SYMBOL(cpu_present_mask); | |
514 | ||
515 | static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; | |
516 | const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); | |
517 | EXPORT_SYMBOL(cpu_active_mask); | |
3fa41520 RR |
518 | |
519 | void set_cpu_possible(unsigned int cpu, bool possible) | |
520 | { | |
521 | if (possible) | |
522 | cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); | |
523 | else | |
524 | cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); | |
525 | } | |
526 | ||
527 | void set_cpu_present(unsigned int cpu, bool present) | |
528 | { | |
529 | if (present) | |
530 | cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); | |
531 | else | |
532 | cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); | |
533 | } | |
534 | ||
535 | void set_cpu_online(unsigned int cpu, bool online) | |
536 | { | |
537 | if (online) | |
538 | cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); | |
539 | else | |
540 | cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); | |
541 | } | |
542 | ||
543 | void set_cpu_active(unsigned int cpu, bool active) | |
544 | { | |
545 | if (active) | |
546 | cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); | |
547 | else | |
548 | cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); | |
549 | } | |
550 | ||
551 | void init_cpu_present(const struct cpumask *src) | |
552 | { | |
553 | cpumask_copy(to_cpumask(cpu_present_bits), src); | |
554 | } | |
555 | ||
556 | void init_cpu_possible(const struct cpumask *src) | |
557 | { | |
558 | cpumask_copy(to_cpumask(cpu_possible_bits), src); | |
559 | } | |
560 | ||
561 | void init_cpu_online(const struct cpumask *src) | |
562 | { | |
563 | cpumask_copy(to_cpumask(cpu_online_bits), src); | |
564 | } |