]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* CPU control. |
2 | * (C) 2001, 2002, 2003, 2004 Rusty Russell | |
3 | * | |
4 | * This code is licenced under the GPL. | |
5 | */ | |
6 | #include <linux/proc_fs.h> | |
7 | #include <linux/smp.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/notifier.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/unistd.h> | |
12 | #include <linux/cpu.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/kthread.h> | |
15 | #include <linux/stop_machine.h> | |
81615b62 | 16 | #include <linux/mutex.h> |
1da177e4 LT |
17 | |
18 | /* This protects CPUs going up and down... */ | |
aa953877 LT |
19 | static DEFINE_MUTEX(cpu_add_remove_lock); |
20 | static DEFINE_MUTEX(cpu_bitmask_lock); | |
1da177e4 | 21 | |
bd5349cf | 22 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); |
1da177e4 | 23 | |
e3920fb4 RW |
24 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
25 | * Should always be manipulated under cpu_add_remove_lock | |
26 | */ | |
27 | static int cpu_hotplug_disabled; | |
28 | ||
a9d9baa1 | 29 | #ifdef CONFIG_HOTPLUG_CPU |
90d45d17 | 30 | |
aa953877 LT |
31 | /* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */ |
32 | static struct task_struct *recursive; | |
33 | static int recursive_depth; | |
90d45d17 | 34 | |
a9d9baa1 AR |
35 | void lock_cpu_hotplug(void) |
36 | { | |
aa953877 LT |
37 | struct task_struct *tsk = current; |
38 | ||
39 | if (tsk == recursive) { | |
40 | static int warnings = 10; | |
41 | if (warnings) { | |
42 | printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n"); | |
43 | WARN_ON(1); | |
44 | warnings--; | |
45 | } | |
46 | recursive_depth++; | |
47 | return; | |
48 | } | |
49 | mutex_lock(&cpu_bitmask_lock); | |
50 | recursive = tsk; | |
a9d9baa1 AR |
51 | } |
52 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug); | |
90d45d17 | 53 | |
a9d9baa1 AR |
54 | void unlock_cpu_hotplug(void) |
55 | { | |
aa953877 LT |
56 | WARN_ON(recursive != current); |
57 | if (recursive_depth) { | |
58 | recursive_depth--; | |
59 | return; | |
a9d9baa1 | 60 | } |
aa953877 | 61 | recursive = NULL; |
4b96b1a1 | 62 | mutex_unlock(&cpu_bitmask_lock); |
a9d9baa1 AR |
63 | } |
64 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | |
65 | ||
a9d9baa1 | 66 | #endif /* CONFIG_HOTPLUG_CPU */ |
90d45d17 | 67 | |
1da177e4 | 68 | /* Need to know about CPUs going up/down? */ |
65edc68c | 69 | int __cpuinit register_cpu_notifier(struct notifier_block *nb) |
1da177e4 | 70 | { |
bd5349cf NB |
71 | int ret; |
72 | mutex_lock(&cpu_add_remove_lock); | |
73 | ret = raw_notifier_chain_register(&cpu_chain, nb); | |
74 | mutex_unlock(&cpu_add_remove_lock); | |
75 | return ret; | |
1da177e4 | 76 | } |
65edc68c CS |
77 | |
78 | #ifdef CONFIG_HOTPLUG_CPU | |
79 | ||
1da177e4 LT |
80 | EXPORT_SYMBOL(register_cpu_notifier); |
81 | ||
82 | void unregister_cpu_notifier(struct notifier_block *nb) | |
83 | { | |
bd5349cf NB |
84 | mutex_lock(&cpu_add_remove_lock); |
85 | raw_notifier_chain_unregister(&cpu_chain, nb); | |
86 | mutex_unlock(&cpu_add_remove_lock); | |
1da177e4 LT |
87 | } |
88 | EXPORT_SYMBOL(unregister_cpu_notifier); | |
89 | ||
1da177e4 LT |
90 | static inline void check_for_tasks(int cpu) |
91 | { | |
92 | struct task_struct *p; | |
93 | ||
94 | write_lock_irq(&tasklist_lock); | |
95 | for_each_process(p) { | |
96 | if (task_cpu(p) == cpu && | |
97 | (!cputime_eq(p->utime, cputime_zero) || | |
98 | !cputime_eq(p->stime, cputime_zero))) | |
99 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ | |
e7407dcc | 100 | (state = %ld, flags = %x) \n", |
1da177e4 LT |
101 | p->comm, p->pid, cpu, p->state, p->flags); |
102 | } | |
103 | write_unlock_irq(&tasklist_lock); | |
104 | } | |
105 | ||
106 | /* Take this CPU down. */ | |
107 | static int take_cpu_down(void *unused) | |
108 | { | |
109 | int err; | |
110 | ||
1da177e4 LT |
111 | /* Ensure this CPU doesn't handle any more interrupts. */ |
112 | err = __cpu_disable(); | |
113 | if (err < 0) | |
f3705136 | 114 | return err; |
1da177e4 | 115 | |
f3705136 ZM |
116 | /* Force idle task to run as soon as we yield: it should |
117 | immediately notice cpu is offline and die quickly. */ | |
118 | sched_idle_next(); | |
119 | return 0; | |
1da177e4 LT |
120 | } |
121 | ||
e3920fb4 RW |
122 | /* Requires cpu_add_remove_lock to be held */ |
123 | static int _cpu_down(unsigned int cpu) | |
1da177e4 | 124 | { |
e7407dcc | 125 | int err, nr_calls = 0; |
1da177e4 LT |
126 | struct task_struct *p; |
127 | cpumask_t old_allowed, tmp; | |
e7407dcc | 128 | void *hcpu = (void *)(long)cpu; |
1da177e4 | 129 | |
e3920fb4 RW |
130 | if (num_online_cpus() == 1) |
131 | return -EBUSY; | |
1da177e4 | 132 | |
e3920fb4 RW |
133 | if (!cpu_online(cpu)) |
134 | return -EINVAL; | |
1da177e4 | 135 | |
e7407dcc HC |
136 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); |
137 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, | |
138 | hcpu, -1, &nr_calls); | |
1da177e4 | 139 | if (err == NOTIFY_BAD) { |
e7407dcc HC |
140 | __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu, |
141 | nr_calls, NULL); | |
1da177e4 LT |
142 | printk("%s: attempt to take down CPU %u failed\n", |
143 | __FUNCTION__, cpu); | |
baaca49f GS |
144 | err = -EINVAL; |
145 | goto out_release; | |
1da177e4 LT |
146 | } |
147 | ||
148 | /* Ensure that we are not runnable on dying cpu */ | |
149 | old_allowed = current->cpus_allowed; | |
150 | tmp = CPU_MASK_ALL; | |
151 | cpu_clear(cpu, tmp); | |
152 | set_cpus_allowed(current, tmp); | |
153 | ||
aa953877 | 154 | mutex_lock(&cpu_bitmask_lock); |
1da177e4 | 155 | p = __stop_machine_run(take_cpu_down, NULL, cpu); |
aa953877 LT |
156 | mutex_unlock(&cpu_bitmask_lock); |
157 | ||
8fa1d7d3 | 158 | if (IS_ERR(p) || cpu_online(cpu)) { |
1da177e4 | 159 | /* CPU didn't die: tell everyone. Can't complain. */ |
bd5349cf | 160 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, |
e7407dcc | 161 | hcpu) == NOTIFY_BAD) |
1da177e4 LT |
162 | BUG(); |
163 | ||
8fa1d7d3 ST |
164 | if (IS_ERR(p)) { |
165 | err = PTR_ERR(p); | |
166 | goto out_allowed; | |
167 | } | |
1da177e4 | 168 | goto out_thread; |
8fa1d7d3 | 169 | } |
1da177e4 LT |
170 | |
171 | /* Wait for it to sleep (leaving idle task). */ | |
172 | while (!idle_cpu(cpu)) | |
173 | yield(); | |
174 | ||
175 | /* This actually kills the CPU. */ | |
176 | __cpu_die(cpu); | |
177 | ||
178 | /* Move it here so it can run. */ | |
179 | kthread_bind(p, get_cpu()); | |
180 | put_cpu(); | |
181 | ||
182 | /* CPU is completely dead: tell everyone. Too late to complain. */ | |
e7407dcc | 183 | if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu) == NOTIFY_BAD) |
1da177e4 LT |
184 | BUG(); |
185 | ||
186 | check_for_tasks(cpu); | |
187 | ||
188 | out_thread: | |
189 | err = kthread_stop(p); | |
190 | out_allowed: | |
191 | set_cpus_allowed(current, old_allowed); | |
baaca49f GS |
192 | out_release: |
193 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, | |
194 | (void *)(long)cpu); | |
e3920fb4 RW |
195 | return err; |
196 | } | |
197 | ||
198 | int cpu_down(unsigned int cpu) | |
199 | { | |
200 | int err = 0; | |
201 | ||
202 | mutex_lock(&cpu_add_remove_lock); | |
203 | if (cpu_hotplug_disabled) | |
204 | err = -EBUSY; | |
205 | else | |
206 | err = _cpu_down(cpu); | |
207 | ||
aa953877 | 208 | mutex_unlock(&cpu_add_remove_lock); |
1da177e4 LT |
209 | return err; |
210 | } | |
211 | #endif /*CONFIG_HOTPLUG_CPU*/ | |
212 | ||
e3920fb4 | 213 | /* Requires cpu_add_remove_lock to be held */ |
b282b6f8 | 214 | static int __cpuinit _cpu_up(unsigned int cpu) |
1da177e4 | 215 | { |
baaca49f | 216 | int ret, nr_calls = 0; |
1da177e4 LT |
217 | void *hcpu = (void *)(long)cpu; |
218 | ||
e3920fb4 RW |
219 | if (cpu_online(cpu) || !cpu_present(cpu)) |
220 | return -EINVAL; | |
90d45d17 | 221 | |
baaca49f GS |
222 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); |
223 | ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu, | |
224 | -1, &nr_calls); | |
1da177e4 LT |
225 | if (ret == NOTIFY_BAD) { |
226 | printk("%s: attempt to bring up CPU %u failed\n", | |
227 | __FUNCTION__, cpu); | |
228 | ret = -EINVAL; | |
229 | goto out_notify; | |
230 | } | |
231 | ||
232 | /* Arch-specific enabling code. */ | |
aa953877 | 233 | mutex_lock(&cpu_bitmask_lock); |
1da177e4 | 234 | ret = __cpu_up(cpu); |
aa953877 | 235 | mutex_unlock(&cpu_bitmask_lock); |
1da177e4 LT |
236 | if (ret != 0) |
237 | goto out_notify; | |
6978c705 | 238 | BUG_ON(!cpu_online(cpu)); |
1da177e4 LT |
239 | |
240 | /* Now call notifier in preparation. */ | |
bd5349cf | 241 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); |
1da177e4 LT |
242 | |
243 | out_notify: | |
244 | if (ret != 0) | |
baaca49f GS |
245 | __raw_notifier_call_chain(&cpu_chain, |
246 | CPU_UP_CANCELED, hcpu, nr_calls, NULL); | |
247 | raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); | |
e3920fb4 RW |
248 | |
249 | return ret; | |
250 | } | |
251 | ||
b282b6f8 | 252 | int __cpuinit cpu_up(unsigned int cpu) |
e3920fb4 RW |
253 | { |
254 | int err = 0; | |
255 | ||
256 | mutex_lock(&cpu_add_remove_lock); | |
257 | if (cpu_hotplug_disabled) | |
258 | err = -EBUSY; | |
259 | else | |
260 | err = _cpu_up(cpu); | |
261 | ||
262 | mutex_unlock(&cpu_add_remove_lock); | |
263 | return err; | |
264 | } | |
265 | ||
266 | #ifdef CONFIG_SUSPEND_SMP | |
1d64b9cb RW |
267 | /* Needed to prevent the microcode driver from requesting firmware in its CPU |
268 | * hotplug notifier during the suspend/resume. | |
269 | */ | |
270 | int suspend_cpu_hotplug; | |
271 | EXPORT_SYMBOL(suspend_cpu_hotplug); | |
272 | ||
e3920fb4 RW |
273 | static cpumask_t frozen_cpus; |
274 | ||
275 | int disable_nonboot_cpus(void) | |
276 | { | |
e1d9fd2e | 277 | int cpu, first_cpu, error = 0; |
e3920fb4 RW |
278 | |
279 | mutex_lock(&cpu_add_remove_lock); | |
1d64b9cb RW |
280 | suspend_cpu_hotplug = 1; |
281 | first_cpu = first_cpu(cpu_online_map); | |
e3920fb4 RW |
282 | /* We take down all of the non-boot CPUs in one shot to avoid races |
283 | * with the userspace trying to use the CPU hotplug at the same time | |
284 | */ | |
285 | cpus_clear(frozen_cpus); | |
286 | printk("Disabling non-boot CPUs ...\n"); | |
287 | for_each_online_cpu(cpu) { | |
288 | if (cpu == first_cpu) | |
289 | continue; | |
290 | error = _cpu_down(cpu); | |
291 | if (!error) { | |
292 | cpu_set(cpu, frozen_cpus); | |
293 | printk("CPU%d is down\n", cpu); | |
294 | } else { | |
295 | printk(KERN_ERR "Error taking CPU%d down: %d\n", | |
296 | cpu, error); | |
297 | break; | |
298 | } | |
299 | } | |
300 | if (!error) { | |
301 | BUG_ON(num_online_cpus() > 1); | |
302 | /* Make sure the CPUs won't be enabled by someone else */ | |
303 | cpu_hotplug_disabled = 1; | |
304 | } else { | |
e1d9fd2e | 305 | printk(KERN_ERR "Non-boot CPUs are not disabled\n"); |
e3920fb4 | 306 | } |
1d64b9cb | 307 | suspend_cpu_hotplug = 0; |
aa953877 | 308 | mutex_unlock(&cpu_add_remove_lock); |
e3920fb4 RW |
309 | return error; |
310 | } | |
311 | ||
312 | void enable_nonboot_cpus(void) | |
313 | { | |
314 | int cpu, error; | |
315 | ||
316 | /* Allow everyone to use the CPU hotplug again */ | |
317 | mutex_lock(&cpu_add_remove_lock); | |
318 | cpu_hotplug_disabled = 0; | |
ed746e3b | 319 | if (cpus_empty(frozen_cpus)) |
1d64b9cb | 320 | goto out; |
e3920fb4 | 321 | |
1d64b9cb | 322 | suspend_cpu_hotplug = 1; |
e3920fb4 RW |
323 | printk("Enabling non-boot CPUs ...\n"); |
324 | for_each_cpu_mask(cpu, frozen_cpus) { | |
1d64b9cb | 325 | error = _cpu_up(cpu); |
e3920fb4 RW |
326 | if (!error) { |
327 | printk("CPU%d is up\n", cpu); | |
328 | continue; | |
329 | } | |
1d64b9cb | 330 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); |
e3920fb4 RW |
331 | } |
332 | cpus_clear(frozen_cpus); | |
1d64b9cb RW |
333 | suspend_cpu_hotplug = 0; |
334 | out: | |
335 | mutex_unlock(&cpu_add_remove_lock); | |
1da177e4 | 336 | } |
e3920fb4 | 337 | #endif |