aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-10-03 04:14:04 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-03 11:04:06 -0400
commit5c1e176781f43bc902a51e5832f789756bff911b (patch)
treed28ad2b40830eec152d94030b2f75777f26dd869
parent2c136efcf6f58d07512c4df83eb494597fe0d229 (diff)
[PATCH] sched: force /sbin/init off isolated cpus
Force /sbin/init off isolated cpus (unless every CPU is specified as an isolcpu). Users seem to think that the isolated CPUs shouldn't have much running on them to begin with. That's fair enough: intuitive, I guess. It also means that the cpu affinity masks of tasks will not include isolcpus by default, which is also more intuitive, perhaps. /sbin/init is spawned from the boot CPU's idle thread, and /sbin/init starts the rest of userspace. So if the boot CPU is specified to be an isolcpu, then prior to this patch, all of userspace will be run there. (throw in a couple of plausible devinit -> cpuinit conversions I spotted while we're here). Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Dimitri Sivanich <sivanich@sgi.com> Acked-by: Paul Jackson <pj@sgi.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/sched.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e4e54e86f4a2..ddf418810c39 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4817,7 +4817,7 @@ void show_state(void)
4817 * NOTE: this function does not set the idle thread's NEED_RESCHED 4817 * NOTE: this function does not set the idle thread's NEED_RESCHED
4818 * flag, to make booting more robust. 4818 * flag, to make booting more robust.
4819 */ 4819 */
4820void __devinit init_idle(struct task_struct *idle, int cpu) 4820void __cpuinit init_idle(struct task_struct *idle, int cpu)
4821{ 4821{
4822 struct rq *rq = cpu_rq(cpu); 4822 struct rq *rq = cpu_rq(cpu);
4823 unsigned long flags; 4823 unsigned long flags;
@@ -5461,7 +5461,7 @@ static void cpu_attach_domain(struct sched_domain *sd, int cpu)
5461} 5461}
5462 5462
5463/* cpus with isolated domains */ 5463/* cpus with isolated domains */
5464static cpumask_t __devinitdata cpu_isolated_map = CPU_MASK_NONE; 5464static cpumask_t __cpuinitdata cpu_isolated_map = CPU_MASK_NONE;
5465 5465
5466/* Setup the mask of cpus configured for isolated domains */ 5466/* Setup the mask of cpus configured for isolated domains */
5467static int __init isolated_cpu_setup(char *str) 5467static int __init isolated_cpu_setup(char *str)
@@ -6747,11 +6747,20 @@ static int update_sched_domains(struct notifier_block *nfb,
6747 6747
6748void __init sched_init_smp(void) 6748void __init sched_init_smp(void)
6749{ 6749{
6750 cpumask_t non_isolated_cpus;
6751
6750 lock_cpu_hotplug(); 6752 lock_cpu_hotplug();
6751 arch_init_sched_domains(&cpu_online_map); 6753 arch_init_sched_domains(&cpu_online_map);
6754 cpus_andnot(non_isolated_cpus, cpu_online_map, cpu_isolated_map);
6755 if (cpus_empty(non_isolated_cpus))
6756 cpu_set(smp_processor_id(), non_isolated_cpus);
6752 unlock_cpu_hotplug(); 6757 unlock_cpu_hotplug();
6753 /* XXX: Theoretical race here - CPU may be hotplugged now */ 6758 /* XXX: Theoretical race here - CPU may be hotplugged now */
6754 hotcpu_notifier(update_sched_domains, 0); 6759 hotcpu_notifier(update_sched_domains, 0);
6760
6761 /* Move init over to a non-isolated CPU */
6762 if (set_cpus_allowed(current, non_isolated_cpus) < 0)
6763 BUG();
6755} 6764}
6756#else 6765#else
6757void __init sched_init_smp(void) 6766void __init sched_init_smp(void)