aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMax Krasnyansky <maxk@qualcomm.com>2008-05-29 14:17:01 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-06 09:25:00 -0400
commit5c8e1ed1d204a6770ca2854cd3b3597070fe7e5a (patch)
tree72a2bb4394d1f0b8492ea9566990a39ca597b840
parent1100ac91b6af02d8639d518fad5b434b1bf44ed6 (diff)
sched: CPU hotplug events must not destroy scheduler domains created by the cpusets
First issue is not related to the cpusets. We're simply leaking doms_cur. It's allocated in arch_init_sched_domains() which is called for every hotplug event. So we just keep reallocation doms_cur without freeing it. I introduced free_sched_domains() function that cleans things up. Second issue is that sched domains created by the cpusets are completely destroyed by the CPU hotplug events. For all CPU hotplug events scheduler attaches all CPUs to the NULL domain and then puts them all into the single domain thereby destroying domains created by the cpusets (partition_sched_domains). The solution is simple, when cpusets are enabled scheduler should not create default domain and instead let cpusets do that. Which is exactly what the patch does. Signed-off-by: Max Krasnyansky <maxk@qualcomm.com> Cc: pj@sgi.com Cc: menage@google.com Cc: rostedt@goodmis.org Cc: mingo@elte.hu Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/cpuset.c6
-rw-r--r--kernel/sched.c22
2 files changed, 28 insertions, 0 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 86ea9e34e326..6090d18b58a9 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1886,6 +1886,12 @@ static void common_cpu_mem_hotplug_unplug(void)
1886 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 1886 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
1887 scan_for_empty_cpusets(&top_cpuset); 1887 scan_for_empty_cpusets(&top_cpuset);
1888 1888
1889 /*
1890 * Scheduler destroys domains on hotplug events.
1891 * Rebuild them based on the current settings.
1892 */
1893 rebuild_sched_domains();
1894
1889 cgroup_unlock(); 1895 cgroup_unlock();
1890} 1896}
1891 1897
diff --git a/kernel/sched.c b/kernel/sched.c
index f0ed81b71282..1ddb0a8c7976 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7293,6 +7293,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
7293} 7293}
7294 7294
7295/* 7295/*
7296 * Free current domain masks.
7297 * Called after all cpus are attached to NULL domain.
7298 */
7299static void free_sched_domains(void)
7300{
7301 ndoms_cur = 0;
7302 if (doms_cur != &fallback_doms)
7303 kfree(doms_cur);
7304 doms_cur = &fallback_doms;
7305}
7306
7307/*
7296 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7308 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7297 * For now this just excludes isolated cpus, but could be used to 7309 * For now this just excludes isolated cpus, but could be used to
7298 * exclude other special cases in the future. 7310 * exclude other special cases in the future.
@@ -7439,6 +7451,7 @@ int arch_reinit_sched_domains(void)
7439 get_online_cpus(); 7451 get_online_cpus();
7440 mutex_lock(&sched_domains_mutex); 7452 mutex_lock(&sched_domains_mutex);
7441 detach_destroy_domains(&cpu_online_map); 7453 detach_destroy_domains(&cpu_online_map);
7454 free_sched_domains();
7442 err = arch_init_sched_domains(&cpu_online_map); 7455 err = arch_init_sched_domains(&cpu_online_map);
7443 mutex_unlock(&sched_domains_mutex); 7456 mutex_unlock(&sched_domains_mutex);
7444 put_online_cpus(); 7457 put_online_cpus();
@@ -7524,6 +7537,7 @@ static int update_sched_domains(struct notifier_block *nfb,
7524 case CPU_DOWN_PREPARE: 7537 case CPU_DOWN_PREPARE:
7525 case CPU_DOWN_PREPARE_FROZEN: 7538 case CPU_DOWN_PREPARE_FROZEN:
7526 detach_destroy_domains(&cpu_online_map); 7539 detach_destroy_domains(&cpu_online_map);
7540 free_sched_domains();
7527 return NOTIFY_OK; 7541 return NOTIFY_OK;
7528 7542
7529 case CPU_UP_CANCELED: 7543 case CPU_UP_CANCELED:
@@ -7542,8 +7556,16 @@ static int update_sched_domains(struct notifier_block *nfb,
7542 return NOTIFY_DONE; 7556 return NOTIFY_DONE;
7543 } 7557 }
7544 7558
7559#ifndef CONFIG_CPUSETS
7560 /*
7561 * Create default domain partitioning if cpusets are disabled.
7562 * Otherwise we let cpusets rebuild the domains based on the
7563 * current setup.
7564 */
7565
7545 /* The hotplug lock is already held by cpu_up/cpu_down */ 7566 /* The hotplug lock is already held by cpu_up/cpu_down */
7546 arch_init_sched_domains(&cpu_online_map); 7567 arch_init_sched_domains(&cpu_online_map);
7568#endif
7547 7569
7548 return NOTIFY_OK; 7570 return NOTIFY_OK;
7549} 7571}