aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c138
1 files changed, 118 insertions, 20 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f230f9ae01c2..32c96628463e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -21,6 +21,11 @@ static DEFINE_MUTEX(cpu_bitmask_lock);
21 21
22static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); 22static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain);
23 23
24/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
25 * Should always be manipulated under cpu_add_remove_lock
26 */
27static int cpu_hotplug_disabled;
28
24#ifdef CONFIG_HOTPLUG_CPU 29#ifdef CONFIG_HOTPLUG_CPU
25 30
26/* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */ 31/* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */
@@ -108,30 +113,25 @@ static int take_cpu_down(void *unused)
108 return 0; 113 return 0;
109} 114}
110 115
111int cpu_down(unsigned int cpu) 116/* Requires cpu_add_remove_lock to be held */
117static int _cpu_down(unsigned int cpu)
112{ 118{
113 int err; 119 int err;
114 struct task_struct *p; 120 struct task_struct *p;
115 cpumask_t old_allowed, tmp; 121 cpumask_t old_allowed, tmp;
116 122
117 mutex_lock(&cpu_add_remove_lock); 123 if (num_online_cpus() == 1)
118 if (num_online_cpus() == 1) { 124 return -EBUSY;
119 err = -EBUSY;
120 goto out;
121 }
122 125
123 if (!cpu_online(cpu)) { 126 if (!cpu_online(cpu))
124 err = -EINVAL; 127 return -EINVAL;
125 goto out;
126 }
127 128
128 err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, 129 err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
129 (void *)(long)cpu); 130 (void *)(long)cpu);
130 if (err == NOTIFY_BAD) { 131 if (err == NOTIFY_BAD) {
131 printk("%s: attempt to take down CPU %u failed\n", 132 printk("%s: attempt to take down CPU %u failed\n",
132 __FUNCTION__, cpu); 133 __FUNCTION__, cpu);
133 err = -EINVAL; 134 return -EINVAL;
134 goto out;
135 } 135 }
136 136
137 /* Ensure that we are not runnable on dying cpu */ 137 /* Ensure that we are not runnable on dying cpu */
@@ -179,22 +179,32 @@ out_thread:
179 err = kthread_stop(p); 179 err = kthread_stop(p);
180out_allowed: 180out_allowed:
181 set_cpus_allowed(current, old_allowed); 181 set_cpus_allowed(current, old_allowed);
182out: 182 return err;
183}
184
185int cpu_down(unsigned int cpu)
186{
187 int err = 0;
188
189 mutex_lock(&cpu_add_remove_lock);
190 if (cpu_hotplug_disabled)
191 err = -EBUSY;
192 else
193 err = _cpu_down(cpu);
194
183 mutex_unlock(&cpu_add_remove_lock); 195 mutex_unlock(&cpu_add_remove_lock);
184 return err; 196 return err;
185} 197}
186#endif /*CONFIG_HOTPLUG_CPU*/ 198#endif /*CONFIG_HOTPLUG_CPU*/
187 199
188int __devinit cpu_up(unsigned int cpu) 200/* Requires cpu_add_remove_lock to be held */
201static int __devinit _cpu_up(unsigned int cpu)
189{ 202{
190 int ret; 203 int ret;
191 void *hcpu = (void *)(long)cpu; 204 void *hcpu = (void *)(long)cpu;
192 205
193 mutex_lock(&cpu_add_remove_lock); 206 if (cpu_online(cpu) || !cpu_present(cpu))
194 if (cpu_online(cpu) || !cpu_present(cpu)) { 207 return -EINVAL;
195 ret = -EINVAL;
196 goto out;
197 }
198 208
199 ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); 209 ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
200 if (ret == NOTIFY_BAD) { 210 if (ret == NOTIFY_BAD) {
@@ -219,7 +229,95 @@ out_notify:
219 if (ret != 0) 229 if (ret != 0)
220 blocking_notifier_call_chain(&cpu_chain, 230 blocking_notifier_call_chain(&cpu_chain,
221 CPU_UP_CANCELED, hcpu); 231 CPU_UP_CANCELED, hcpu);
232
233 return ret;
234}
235
236int __devinit cpu_up(unsigned int cpu)
237{
238 int err = 0;
239
240 mutex_lock(&cpu_add_remove_lock);
241 if (cpu_hotplug_disabled)
242 err = -EBUSY;
243 else
244 err = _cpu_up(cpu);
245
246 mutex_unlock(&cpu_add_remove_lock);
247 return err;
248}
249
250#ifdef CONFIG_SUSPEND_SMP
251static cpumask_t frozen_cpus;
252
253int disable_nonboot_cpus(void)
254{
255 int cpu, first_cpu, error;
256
257 mutex_lock(&cpu_add_remove_lock);
258 first_cpu = first_cpu(cpu_present_map);
259 if (!cpu_online(first_cpu)) {
260 error = _cpu_up(first_cpu);
261 if (error) {
262 printk(KERN_ERR "Could not bring CPU%d up.\n",
263 first_cpu);
264 goto out;
265 }
266 }
267 error = set_cpus_allowed(current, cpumask_of_cpu(first_cpu));
268 if (error) {
269 printk(KERN_ERR "Could not run on CPU%d\n", first_cpu);
270 goto out;
271 }
272 /* We take down all of the non-boot CPUs in one shot to avoid races
273 * with the userspace trying to use the CPU hotplug at the same time
274 */
275 cpus_clear(frozen_cpus);
276 printk("Disabling non-boot CPUs ...\n");
277 for_each_online_cpu(cpu) {
278 if (cpu == first_cpu)
279 continue;
280 error = _cpu_down(cpu);
281 if (!error) {
282 cpu_set(cpu, frozen_cpus);
283 printk("CPU%d is down\n", cpu);
284 } else {
285 printk(KERN_ERR "Error taking CPU%d down: %d\n",
286 cpu, error);
287 break;
288 }
289 }
290 if (!error) {
291 BUG_ON(num_online_cpus() > 1);
292 /* Make sure the CPUs won't be enabled by someone else */
293 cpu_hotplug_disabled = 1;
294 } else {
295 printk(KERN_ERR "Non-boot CPUs are not disabled");
296 }
222out: 297out:
223 mutex_unlock(&cpu_add_remove_lock); 298 mutex_unlock(&cpu_add_remove_lock);
224 return ret; 299 return error;
300}
301
302void enable_nonboot_cpus(void)
303{
304 int cpu, error;
305
306 /* Allow everyone to use the CPU hotplug again */
307 mutex_lock(&cpu_add_remove_lock);
308 cpu_hotplug_disabled = 0;
309 mutex_unlock(&cpu_add_remove_lock);
310
311 printk("Enabling non-boot CPUs ...\n");
312 for_each_cpu_mask(cpu, frozen_cpus) {
313 error = cpu_up(cpu);
314 if (!error) {
315 printk("CPU%d is up\n", cpu);
316 continue;
317 }
318 printk(KERN_WARNING "Error taking CPU%d up: %d\n",
319 cpu, error);
320 }
321 cpus_clear(frozen_cpus);
225} 322}
323#endif