aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-06-23 04:52:59 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-23 04:52:59 -0400
commita60b33cf59d1c9e0e363287fce799cb23d45660c (patch)
tree85eb8feea5717cf472a2549e53f59183a95617c5 /kernel/sched.c
parent0f476b6d91a1395bda6464e653ce66ea9bea7167 (diff)
parent481c5346d0981940ee63037eb53e4e37b0735c10 (diff)
Merge branch 'linus' into core/softirq
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c34
1 files changed, 31 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 7c7b2d052c02..33680bc17cf4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1127,6 +1127,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1127 return HRTIMER_NORESTART; 1127 return HRTIMER_NORESTART;
1128} 1128}
1129 1129
1130#ifdef CONFIG_SMP
1130static void hotplug_hrtick_disable(int cpu) 1131static void hotplug_hrtick_disable(int cpu)
1131{ 1132{
1132 struct rq *rq = cpu_rq(cpu); 1133 struct rq *rq = cpu_rq(cpu);
@@ -1182,6 +1183,7 @@ static void init_hrtick(void)
1182{ 1183{
1183 hotcpu_notifier(hotplug_hrtick, 0); 1184 hotcpu_notifier(hotplug_hrtick, 0);
1184} 1185}
1186#endif /* CONFIG_SMP */
1185 1187
1186static void init_rq_hrtick(struct rq *rq) 1188static void init_rq_hrtick(struct rq *rq)
1187{ 1189{
@@ -6877,7 +6879,12 @@ static int default_relax_domain_level = -1;
6877 6879
6878static int __init setup_relax_domain_level(char *str) 6880static int __init setup_relax_domain_level(char *str)
6879{ 6881{
6880 default_relax_domain_level = simple_strtoul(str, NULL, 0); 6882 unsigned long val;
6883
6884 val = simple_strtoul(str, NULL, 0);
6885 if (val < SD_LV_MAX)
6886 default_relax_domain_level = val;
6887
6881 return 1; 6888 return 1;
6882} 6889}
6883__setup("relax_domain_level=", setup_relax_domain_level); 6890__setup("relax_domain_level=", setup_relax_domain_level);
@@ -7236,6 +7243,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
7236} 7243}
7237 7244
7238/* 7245/*
7246 * Free current domain masks.
7247 * Called after all cpus are attached to NULL domain.
7248 */
7249static void free_sched_domains(void)
7250{
7251 ndoms_cur = 0;
7252 if (doms_cur != &fallback_doms)
7253 kfree(doms_cur);
7254 doms_cur = &fallback_doms;
7255}
7256
7257/*
7239 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7258 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7240 * For now this just excludes isolated cpus, but could be used to 7259 * For now this just excludes isolated cpus, but could be used to
7241 * exclude other special cases in the future. 7260 * exclude other special cases in the future.
@@ -7382,6 +7401,7 @@ int arch_reinit_sched_domains(void)
7382 get_online_cpus(); 7401 get_online_cpus();
7383 mutex_lock(&sched_domains_mutex); 7402 mutex_lock(&sched_domains_mutex);
7384 detach_destroy_domains(&cpu_online_map); 7403 detach_destroy_domains(&cpu_online_map);
7404 free_sched_domains();
7385 err = arch_init_sched_domains(&cpu_online_map); 7405 err = arch_init_sched_domains(&cpu_online_map);
7386 mutex_unlock(&sched_domains_mutex); 7406 mutex_unlock(&sched_domains_mutex);
7387 put_online_cpus(); 7407 put_online_cpus();
@@ -7467,6 +7487,7 @@ static int update_sched_domains(struct notifier_block *nfb,
7467 case CPU_DOWN_PREPARE: 7487 case CPU_DOWN_PREPARE:
7468 case CPU_DOWN_PREPARE_FROZEN: 7488 case CPU_DOWN_PREPARE_FROZEN:
7469 detach_destroy_domains(&cpu_online_map); 7489 detach_destroy_domains(&cpu_online_map);
7490 free_sched_domains();
7470 return NOTIFY_OK; 7491 return NOTIFY_OK;
7471 7492
7472 case CPU_UP_CANCELED: 7493 case CPU_UP_CANCELED:
@@ -7485,8 +7506,16 @@ static int update_sched_domains(struct notifier_block *nfb,
7485 return NOTIFY_DONE; 7506 return NOTIFY_DONE;
7486 } 7507 }
7487 7508
7509#ifndef CONFIG_CPUSETS
7510 /*
7511 * Create default domain partitioning if cpusets are disabled.
7512 * Otherwise we let cpusets rebuild the domains based on the
7513 * current setup.
7514 */
7515
7488 /* The hotplug lock is already held by cpu_up/cpu_down */ 7516 /* The hotplug lock is already held by cpu_up/cpu_down */
7489 arch_init_sched_domains(&cpu_online_map); 7517 arch_init_sched_domains(&cpu_online_map);
7518#endif
7490 7519
7491 return NOTIFY_OK; 7520 return NOTIFY_OK;
7492} 7521}
@@ -7626,7 +7655,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
7626 else 7655 else
7627 rt_se->rt_rq = parent->my_q; 7656 rt_se->rt_rq = parent->my_q;
7628 7657
7629 rt_se->rt_rq = &rq->rt;
7630 rt_se->my_q = rt_rq; 7658 rt_se->my_q = rt_rq;
7631 rt_se->parent = parent; 7659 rt_se->parent = parent;
7632 INIT_LIST_HEAD(&rt_se->run_list); 7660 INIT_LIST_HEAD(&rt_se->run_list);
@@ -8348,7 +8376,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
8348#ifdef CONFIG_CGROUP_SCHED 8376#ifdef CONFIG_CGROUP_SCHED
8349static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 8377static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8350{ 8378{
8351 struct task_group *tgi, *parent = tg->parent; 8379 struct task_group *tgi, *parent = tg ? tg->parent : NULL;
8352 unsigned long total = 0; 8380 unsigned long total = 0;
8353 8381
8354 if (!parent) { 8382 if (!parent) {