aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-06-25 06:28:47 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-25 06:28:47 -0400
commitf4628e644c34d9e6242ea18487b2ed58ee04e3eb (patch)
tree3e1cd355fb6959d5b9f569342e4533a48e53aa11 /kernel/sched.c
parentcb9aa97c21c59ad01c9514d7faf45dc166fba226 (diff)
parent543cf4cb3fe6f6cae3651ba918b9c56200b257d0 (diff)
Merge branch 'linus' into tracing/mmiotrace-mergefixups
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c48
1 files changed, 37 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c994d12abbf6..2a7ad35ea79b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1146,6 +1146,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1146 return HRTIMER_NORESTART; 1146 return HRTIMER_NORESTART;
1147} 1147}
1148 1148
1149#ifdef CONFIG_SMP
1149static void hotplug_hrtick_disable(int cpu) 1150static void hotplug_hrtick_disable(int cpu)
1150{ 1151{
1151 struct rq *rq = cpu_rq(cpu); 1152 struct rq *rq = cpu_rq(cpu);
@@ -1201,6 +1202,7 @@ static void init_hrtick(void)
1201{ 1202{
1202 hotcpu_notifier(hotplug_hrtick, 0); 1203 hotcpu_notifier(hotplug_hrtick, 0);
1203} 1204}
1205#endif /* CONFIG_SMP */
1204 1206
1205static void init_rq_hrtick(struct rq *rq) 1207static void init_rq_hrtick(struct rq *rq)
1206{ 1208{
@@ -4447,22 +4449,20 @@ do_wait_for_common(struct completion *x, long timeout, int state)
4447 signal_pending(current)) || 4449 signal_pending(current)) ||
4448 (state == TASK_KILLABLE && 4450 (state == TASK_KILLABLE &&
4449 fatal_signal_pending(current))) { 4451 fatal_signal_pending(current))) {
4450 __remove_wait_queue(&x->wait, &wait); 4452 timeout = -ERESTARTSYS;
4451 return -ERESTARTSYS; 4453 break;
4452 } 4454 }
4453 __set_current_state(state); 4455 __set_current_state(state);
4454 spin_unlock_irq(&x->wait.lock); 4456 spin_unlock_irq(&x->wait.lock);
4455 timeout = schedule_timeout(timeout); 4457 timeout = schedule_timeout(timeout);
4456 spin_lock_irq(&x->wait.lock); 4458 spin_lock_irq(&x->wait.lock);
4457 if (!timeout) { 4459 } while (!x->done && timeout);
4458 __remove_wait_queue(&x->wait, &wait);
4459 return timeout;
4460 }
4461 } while (!x->done);
4462 __remove_wait_queue(&x->wait, &wait); 4460 __remove_wait_queue(&x->wait, &wait);
4461 if (!x->done)
4462 return timeout;
4463 } 4463 }
4464 x->done--; 4464 x->done--;
4465 return timeout; 4465 return timeout ?: 1;
4466} 4466}
4467 4467
4468static long __sched 4468static long __sched
@@ -6928,7 +6928,12 @@ static int default_relax_domain_level = -1;
6928 6928
6929static int __init setup_relax_domain_level(char *str) 6929static int __init setup_relax_domain_level(char *str)
6930{ 6930{
6931 default_relax_domain_level = simple_strtoul(str, NULL, 0); 6931 unsigned long val;
6932
6933 val = simple_strtoul(str, NULL, 0);
6934 if (val < SD_LV_MAX)
6935 default_relax_domain_level = val;
6936
6932 return 1; 6937 return 1;
6933} 6938}
6934__setup("relax_domain_level=", setup_relax_domain_level); 6939__setup("relax_domain_level=", setup_relax_domain_level);
@@ -7287,6 +7292,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
7287} 7292}
7288 7293
7289/* 7294/*
7295 * Free current domain masks.
7296 * Called after all cpus are attached to NULL domain.
7297 */
7298static void free_sched_domains(void)
7299{
7300 ndoms_cur = 0;
7301 if (doms_cur != &fallback_doms)
7302 kfree(doms_cur);
7303 doms_cur = &fallback_doms;
7304}
7305
7306/*
7290 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7307 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7291 * For now this just excludes isolated cpus, but could be used to 7308 * For now this just excludes isolated cpus, but could be used to
7292 * exclude other special cases in the future. 7309 * exclude other special cases in the future.
@@ -7433,6 +7450,7 @@ int arch_reinit_sched_domains(void)
7433 get_online_cpus(); 7450 get_online_cpus();
7434 mutex_lock(&sched_domains_mutex); 7451 mutex_lock(&sched_domains_mutex);
7435 detach_destroy_domains(&cpu_online_map); 7452 detach_destroy_domains(&cpu_online_map);
7453 free_sched_domains();
7436 err = arch_init_sched_domains(&cpu_online_map); 7454 err = arch_init_sched_domains(&cpu_online_map);
7437 mutex_unlock(&sched_domains_mutex); 7455 mutex_unlock(&sched_domains_mutex);
7438 put_online_cpus(); 7456 put_online_cpus();
@@ -7518,6 +7536,7 @@ static int update_sched_domains(struct notifier_block *nfb,
7518 case CPU_DOWN_PREPARE: 7536 case CPU_DOWN_PREPARE:
7519 case CPU_DOWN_PREPARE_FROZEN: 7537 case CPU_DOWN_PREPARE_FROZEN:
7520 detach_destroy_domains(&cpu_online_map); 7538 detach_destroy_domains(&cpu_online_map);
7539 free_sched_domains();
7521 return NOTIFY_OK; 7540 return NOTIFY_OK;
7522 7541
7523 case CPU_UP_CANCELED: 7542 case CPU_UP_CANCELED:
@@ -7536,8 +7555,16 @@ static int update_sched_domains(struct notifier_block *nfb,
7536 return NOTIFY_DONE; 7555 return NOTIFY_DONE;
7537 } 7556 }
7538 7557
7558#ifndef CONFIG_CPUSETS
7559 /*
7560 * Create default domain partitioning if cpusets are disabled.
7561 * Otherwise we let cpusets rebuild the domains based on the
7562 * current setup.
7563 */
7564
7539 /* The hotplug lock is already held by cpu_up/cpu_down */ 7565 /* The hotplug lock is already held by cpu_up/cpu_down */
7540 arch_init_sched_domains(&cpu_online_map); 7566 arch_init_sched_domains(&cpu_online_map);
7567#endif
7541 7568
7542 return NOTIFY_OK; 7569 return NOTIFY_OK;
7543} 7570}
@@ -7677,7 +7704,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
7677 else 7704 else
7678 rt_se->rt_rq = parent->my_q; 7705 rt_se->rt_rq = parent->my_q;
7679 7706
7680 rt_se->rt_rq = &rq->rt;
7681 rt_se->my_q = rt_rq; 7707 rt_se->my_q = rt_rq;
7682 rt_se->parent = parent; 7708 rt_se->parent = parent;
7683 INIT_LIST_HEAD(&rt_se->run_list); 7709 INIT_LIST_HEAD(&rt_se->run_list);
@@ -8399,7 +8425,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
8399#ifdef CONFIG_CGROUP_SCHED 8425#ifdef CONFIG_CGROUP_SCHED
8400static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 8426static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8401{ 8427{
8402 struct task_group *tgi, *parent = tg->parent; 8428 struct task_group *tgi, *parent = tg ? tg->parent : NULL;
8403 unsigned long total = 0; 8429 unsigned long total = 0;
8404 8430
8405 if (!parent) { 8431 if (!parent) {