]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* CPU control. |
2 | * (C) 2001, 2002, 2003, 2004 Rusty Russell | |
3 | * | |
4 | * This code is licenced under the GPL. | |
5 | */ | |
6 | #include <linux/proc_fs.h> | |
7 | #include <linux/smp.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/notifier.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/unistd.h> | |
12 | #include <linux/cpu.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/kthread.h> | |
15 | #include <linux/stop_machine.h> | |
16 | #include <asm/semaphore.h> | |
17 | ||
18 | /* This protects CPUs going up and down... */ | |
a9d9baa1 | 19 | static DECLARE_MUTEX(cpucontrol); |
1da177e4 LT |
20 | |
21 | static struct notifier_block *cpu_chain; | |
22 | ||
a9d9baa1 AR |
23 | #ifdef CONFIG_HOTPLUG_CPU |
24 | static struct task_struct *lock_cpu_hotplug_owner; | |
25 | static int lock_cpu_hotplug_depth; | |
90d45d17 | 26 | |
a9d9baa1 | 27 | static int __lock_cpu_hotplug(int interruptible) |
90d45d17 | 28 | { |
a9d9baa1 AR |
29 | int ret = 0; |
30 | ||
31 | if (lock_cpu_hotplug_owner != current) { | |
32 | if (interruptible) | |
33 | ret = down_interruptible(&cpucontrol); | |
34 | else | |
35 | down(&cpucontrol); | |
36 | } | |
37 | ||
38 | /* | |
39 | * Set only if we succeed in locking | |
40 | */ | |
41 | if (!ret) { | |
42 | lock_cpu_hotplug_depth++; | |
43 | lock_cpu_hotplug_owner = current; | |
44 | } | |
45 | ||
46 | return ret; | |
90d45d17 AR |
47 | } |
48 | ||
a9d9baa1 AR |
49 | void lock_cpu_hotplug(void) |
50 | { | |
51 | __lock_cpu_hotplug(0); | |
52 | } | |
53 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug); | |
90d45d17 | 54 | |
a9d9baa1 AR |
55 | void unlock_cpu_hotplug(void) |
56 | { | |
57 | if (--lock_cpu_hotplug_depth == 0) { | |
58 | lock_cpu_hotplug_owner = NULL; | |
59 | up(&cpucontrol); | |
60 | } | |
61 | } | |
62 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | |
63 | ||
64 | int lock_cpu_hotplug_interruptible(void) | |
65 | { | |
66 | return __lock_cpu_hotplug(1); | |
67 | } | |
68 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); | |
69 | #endif /* CONFIG_HOTPLUG_CPU */ | |
90d45d17 | 70 | |
1da177e4 LT |
71 | /* Need to know about CPUs going up/down? */ |
72 | int register_cpu_notifier(struct notifier_block *nb) | |
73 | { | |
74 | int ret; | |
75 | ||
a9d9baa1 | 76 | if ((ret = lock_cpu_hotplug_interruptible()) != 0) |
1da177e4 LT |
77 | return ret; |
78 | ret = notifier_chain_register(&cpu_chain, nb); | |
a9d9baa1 | 79 | unlock_cpu_hotplug(); |
1da177e4 LT |
80 | return ret; |
81 | } | |
82 | EXPORT_SYMBOL(register_cpu_notifier); | |
83 | ||
84 | void unregister_cpu_notifier(struct notifier_block *nb) | |
85 | { | |
a9d9baa1 | 86 | lock_cpu_hotplug(); |
1da177e4 | 87 | notifier_chain_unregister(&cpu_chain, nb); |
a9d9baa1 | 88 | unlock_cpu_hotplug(); |
1da177e4 LT |
89 | } |
90 | EXPORT_SYMBOL(unregister_cpu_notifier); | |
91 | ||
92 | #ifdef CONFIG_HOTPLUG_CPU | |
93 | static inline void check_for_tasks(int cpu) | |
94 | { | |
95 | struct task_struct *p; | |
96 | ||
97 | write_lock_irq(&tasklist_lock); | |
98 | for_each_process(p) { | |
99 | if (task_cpu(p) == cpu && | |
100 | (!cputime_eq(p->utime, cputime_zero) || | |
101 | !cputime_eq(p->stime, cputime_zero))) | |
102 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ | |
103 | (state = %ld, flags = %lx) \n", | |
104 | p->comm, p->pid, cpu, p->state, p->flags); | |
105 | } | |
106 | write_unlock_irq(&tasklist_lock); | |
107 | } | |
108 | ||
109 | /* Take this CPU down. */ | |
110 | static int take_cpu_down(void *unused) | |
111 | { | |
112 | int err; | |
113 | ||
1da177e4 LT |
114 | /* Ensure this CPU doesn't handle any more interrupts. */ |
115 | err = __cpu_disable(); | |
116 | if (err < 0) | |
f3705136 | 117 | return err; |
1da177e4 | 118 | |
f3705136 ZM |
119 | /* Force idle task to run as soon as we yield: it should |
120 | immediately notice cpu is offline and die quickly. */ | |
121 | sched_idle_next(); | |
122 | return 0; | |
1da177e4 LT |
123 | } |
124 | ||
125 | int cpu_down(unsigned int cpu) | |
126 | { | |
127 | int err; | |
128 | struct task_struct *p; | |
129 | cpumask_t old_allowed, tmp; | |
130 | ||
131 | if ((err = lock_cpu_hotplug_interruptible()) != 0) | |
132 | return err; | |
133 | ||
134 | if (num_online_cpus() == 1) { | |
135 | err = -EBUSY; | |
136 | goto out; | |
137 | } | |
138 | ||
139 | if (!cpu_online(cpu)) { | |
140 | err = -EINVAL; | |
141 | goto out; | |
142 | } | |
143 | ||
144 | err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, | |
145 | (void *)(long)cpu); | |
146 | if (err == NOTIFY_BAD) { | |
147 | printk("%s: attempt to take down CPU %u failed\n", | |
148 | __FUNCTION__, cpu); | |
149 | err = -EINVAL; | |
150 | goto out; | |
151 | } | |
152 | ||
153 | /* Ensure that we are not runnable on dying cpu */ | |
154 | old_allowed = current->cpus_allowed; | |
155 | tmp = CPU_MASK_ALL; | |
156 | cpu_clear(cpu, tmp); | |
157 | set_cpus_allowed(current, tmp); | |
158 | ||
159 | p = __stop_machine_run(take_cpu_down, NULL, cpu); | |
160 | if (IS_ERR(p)) { | |
161 | /* CPU didn't die: tell everyone. Can't complain. */ | |
162 | if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, | |
163 | (void *)(long)cpu) == NOTIFY_BAD) | |
164 | BUG(); | |
165 | ||
166 | err = PTR_ERR(p); | |
167 | goto out_allowed; | |
168 | } | |
169 | ||
170 | if (cpu_online(cpu)) | |
171 | goto out_thread; | |
172 | ||
173 | /* Wait for it to sleep (leaving idle task). */ | |
174 | while (!idle_cpu(cpu)) | |
175 | yield(); | |
176 | ||
177 | /* This actually kills the CPU. */ | |
178 | __cpu_die(cpu); | |
179 | ||
180 | /* Move it here so it can run. */ | |
181 | kthread_bind(p, get_cpu()); | |
182 | put_cpu(); | |
183 | ||
184 | /* CPU is completely dead: tell everyone. Too late to complain. */ | |
185 | if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu) | |
186 | == NOTIFY_BAD) | |
187 | BUG(); | |
188 | ||
189 | check_for_tasks(cpu); | |
190 | ||
191 | out_thread: | |
192 | err = kthread_stop(p); | |
193 | out_allowed: | |
194 | set_cpus_allowed(current, old_allowed); | |
195 | out: | |
196 | unlock_cpu_hotplug(); | |
197 | return err; | |
198 | } | |
199 | #endif /*CONFIG_HOTPLUG_CPU*/ | |
200 | ||
201 | int __devinit cpu_up(unsigned int cpu) | |
202 | { | |
203 | int ret; | |
204 | void *hcpu = (void *)(long)cpu; | |
205 | ||
a9d9baa1 | 206 | if ((ret = lock_cpu_hotplug_interruptible()) != 0) |
1da177e4 LT |
207 | return ret; |
208 | ||
209 | if (cpu_online(cpu) || !cpu_present(cpu)) { | |
210 | ret = -EINVAL; | |
211 | goto out; | |
212 | } | |
90d45d17 | 213 | |
1da177e4 LT |
214 | ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); |
215 | if (ret == NOTIFY_BAD) { | |
216 | printk("%s: attempt to bring up CPU %u failed\n", | |
217 | __FUNCTION__, cpu); | |
218 | ret = -EINVAL; | |
219 | goto out_notify; | |
220 | } | |
221 | ||
222 | /* Arch-specific enabling code. */ | |
223 | ret = __cpu_up(cpu); | |
224 | if (ret != 0) | |
225 | goto out_notify; | |
226 | if (!cpu_online(cpu)) | |
227 | BUG(); | |
228 | ||
229 | /* Now call notifier in preparation. */ | |
230 | notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); | |
231 | ||
232 | out_notify: | |
233 | if (ret != 0) | |
234 | notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); | |
235 | out: | |
a9d9baa1 | 236 | unlock_cpu_hotplug(); |
1da177e4 LT |
237 | return ret; |
238 | } |