aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-19 12:47:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-19 12:47:49 -0500
commitaac3d39693529ca538e37ebdb6ed5d6432a697c7 (patch)
treebb1b0c9fe0538008aa2c97c6f5d9dfc2a4c34190 /kernel/cpu.c
parent10e5453ffa0d04a2eda3cda3f55b88cb9c04595f (diff)
parent077614ee1e93245a3b9a4e1213659405dbeb0ba6 (diff)
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (25 commits) sched: Fix broken assertion sched: Assert task state bits at build time sched: Update task_state_arraypwith new states sched: Add missing state chars to TASK_STATE_TO_CHAR_STR sched: Move TASK_STATE_TO_CHAR_STR near the TASK_state bits sched: Teach might_sleep() about preemptible RCU sched: Make warning less noisy sched: Simplify set_task_cpu() sched: Remove the cfs_rq dependency from set_task_cpu() sched: Add pre and post wakeup hooks sched: Move kthread_bind() back to kthread.c sched: Fix select_task_rq() vs hotplug issues sched: Fix sched_exec() balancing sched: Ensure set_task_cpu() is never called on blocked tasks sched: Use TASK_WAKING for fork wakups sched: Select_task_rq_fair() must honour SD_LOAD_BALANCE sched: Fix task_hot() test order sched: Fix set_cpu_active() in cpu_down() sched: Mark boot-cpu active before smp_init() sched: Fix cpu_clock() in NMIs, on !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK ...
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c24
1 files changed, 3 insertions, 21 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 291ac586f37f..1c8ddd6ee940 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -209,6 +209,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
209 return -ENOMEM; 209 return -ENOMEM;
210 210
211 cpu_hotplug_begin(); 211 cpu_hotplug_begin();
212 set_cpu_active(cpu, false);
212 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 213 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
213 hcpu, -1, &nr_calls); 214 hcpu, -1, &nr_calls);
214 if (err == NOTIFY_BAD) { 215 if (err == NOTIFY_BAD) {
@@ -280,18 +281,6 @@ int __ref cpu_down(unsigned int cpu)
280 goto out; 281 goto out;
281 } 282 }
282 283
283 set_cpu_active(cpu, false);
284
285 /*
286 * Make sure the all cpus did the reschedule and are not
287 * using stale version of the cpu_active_mask.
288 * This is not strictly necessary becuase stop_machine()
289 * that we run down the line already provides the required
290 * synchronization. But it's really a side effect and we do not
291 * want to depend on the innards of the stop_machine here.
292 */
293 synchronize_sched();
294
295 err = _cpu_down(cpu, 0); 284 err = _cpu_down(cpu, 0);
296 285
297out: 286out:
@@ -382,19 +371,12 @@ int disable_nonboot_cpus(void)
382 return error; 371 return error;
383 cpu_maps_update_begin(); 372 cpu_maps_update_begin();
384 first_cpu = cpumask_first(cpu_online_mask); 373 first_cpu = cpumask_first(cpu_online_mask);
385 /* We take down all of the non-boot CPUs in one shot to avoid races 374 /*
375 * We take down all of the non-boot CPUs in one shot to avoid races
386 * with the userspace trying to use the CPU hotplug at the same time 376 * with the userspace trying to use the CPU hotplug at the same time
387 */ 377 */
388 cpumask_clear(frozen_cpus); 378 cpumask_clear(frozen_cpus);
389 379
390 for_each_online_cpu(cpu) {
391 if (cpu == first_cpu)
392 continue;
393 set_cpu_active(cpu, false);
394 }
395
396 synchronize_sched();
397
398 printk("Disabling non-boot CPUs ...\n"); 380 printk("Disabling non-boot CPUs ...\n");
399 for_each_online_cpu(cpu) { 381 for_each_online_cpu(cpu) {
400 if (cpu == first_cpu) 382 if (cpu == first_cpu)