aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2008-04-28 05:33:07 -0400
committerIngo Molnar <mingo@elte.hu>2008-05-05 17:56:18 -0400
commit712555ee4f873515612f89554ad1a3fda5fa887e (patch)
tree88ff7222727c63959eb58a3aa729029aaf08a64f /kernel/sched.c
parent690229a0912ca2fef8b542fe4d8b73acfcdc6e24 (diff)
sched: fix missing locking in sched_domains code
Concurrent calls to detach_destroy_domains and arch_init_sched_domains were prevented by the old scheduler subsystem cpu hotplug mutex. When this got converted to get_online_cpus() the locking got broken. Unlike before now several processes can concurrently enter the critical sections that were protected by the old lock. So use the already present doms_cur_mutex to protect these sections again. Cc: Gautham R Shenoy <ego@in.ibm.com> Cc: Paul Jackson <pj@sgi.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8f433fedfcb3..561b3b39bdb8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -242,6 +242,12 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
242} 242}
243#endif 243#endif
244 244
245/*
246 * sched_domains_mutex serializes calls to arch_init_sched_domains,
247 * detach_destroy_domains and partition_sched_domains.
248 */
249static DEFINE_MUTEX(sched_domains_mutex);
250
245#ifdef CONFIG_GROUP_SCHED 251#ifdef CONFIG_GROUP_SCHED
246 252
247#include <linux/cgroup.h> 253#include <linux/cgroup.h>
@@ -308,9 +314,6 @@ static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
308 */ 314 */
309static DEFINE_SPINLOCK(task_group_lock); 315static DEFINE_SPINLOCK(task_group_lock);
310 316
311/* doms_cur_mutex serializes access to doms_cur[] array */
312static DEFINE_MUTEX(doms_cur_mutex);
313
314#ifdef CONFIG_FAIR_GROUP_SCHED 317#ifdef CONFIG_FAIR_GROUP_SCHED
315#ifdef CONFIG_USER_SCHED 318#ifdef CONFIG_USER_SCHED
316# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 319# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
@@ -358,21 +361,9 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
358#endif 361#endif
359} 362}
360 363
361static inline void lock_doms_cur(void)
362{
363 mutex_lock(&doms_cur_mutex);
364}
365
366static inline void unlock_doms_cur(void)
367{
368 mutex_unlock(&doms_cur_mutex);
369}
370
371#else 364#else
372 365
373static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 366static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
374static inline void lock_doms_cur(void) { }
375static inline void unlock_doms_cur(void) { }
376 367
377#endif /* CONFIG_GROUP_SCHED */ 368#endif /* CONFIG_GROUP_SCHED */
378 369
@@ -7822,7 +7813,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
7822{ 7813{
7823 int i, j; 7814 int i, j;
7824 7815
7825 lock_doms_cur(); 7816 mutex_lock(&sched_domains_mutex);
7826 7817
7827 /* always unregister in case we don't destroy any domains */ 7818 /* always unregister in case we don't destroy any domains */
7828 unregister_sched_domain_sysctl(); 7819 unregister_sched_domain_sysctl();
@@ -7871,7 +7862,7 @@ match2:
7871 7862
7872 register_sched_domain_sysctl(); 7863 register_sched_domain_sysctl();
7873 7864
7874 unlock_doms_cur(); 7865 mutex_unlock(&sched_domains_mutex);
7875} 7866}
7876 7867
7877#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 7868#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -7880,8 +7871,10 @@ int arch_reinit_sched_domains(void)
7880 int err; 7871 int err;
7881 7872
7882 get_online_cpus(); 7873 get_online_cpus();
7874 mutex_lock(&sched_domains_mutex);
7883 detach_destroy_domains(&cpu_online_map); 7875 detach_destroy_domains(&cpu_online_map);
7884 err = arch_init_sched_domains(&cpu_online_map); 7876 err = arch_init_sched_domains(&cpu_online_map);
7877 mutex_unlock(&sched_domains_mutex);
7885 put_online_cpus(); 7878 put_online_cpus();
7886 7879
7887 return err; 7880 return err;
@@ -7999,10 +7992,12 @@ void __init sched_init_smp(void)
7999 BUG_ON(sched_group_nodes_bycpu == NULL); 7992 BUG_ON(sched_group_nodes_bycpu == NULL);
8000#endif 7993#endif
8001 get_online_cpus(); 7994 get_online_cpus();
7995 mutex_lock(&sched_domains_mutex);
8002 arch_init_sched_domains(&cpu_online_map); 7996 arch_init_sched_domains(&cpu_online_map);
8003 cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); 7997 cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
8004 if (cpus_empty(non_isolated_cpus)) 7998 if (cpus_empty(non_isolated_cpus))
8005 cpu_set(smp_processor_id(), non_isolated_cpus); 7999 cpu_set(smp_processor_id(), non_isolated_cpus);
8000 mutex_unlock(&sched_domains_mutex);
8006 put_online_cpus(); 8001 put_online_cpus();
8007 /* XXX: Theoretical race here - CPU may be hotplugged now */ 8002 /* XXX: Theoretical race here - CPU may be hotplugged now */
8008 hotcpu_notifier(update_sched_domains, 0); 8003 hotcpu_notifier(update_sched_domains, 0);