aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c48
1 files changed, 37 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index eaf6751e7612..3aaa5c8cb421 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1127,6 +1127,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1127 return HRTIMER_NORESTART; 1127 return HRTIMER_NORESTART;
1128} 1128}
1129 1129
1130#ifdef CONFIG_SMP
1130static void hotplug_hrtick_disable(int cpu) 1131static void hotplug_hrtick_disable(int cpu)
1131{ 1132{
1132 struct rq *rq = cpu_rq(cpu); 1133 struct rq *rq = cpu_rq(cpu);
@@ -1182,6 +1183,7 @@ static void init_hrtick(void)
1182{ 1183{
1183 hotcpu_notifier(hotplug_hrtick, 0); 1184 hotcpu_notifier(hotplug_hrtick, 0);
1184} 1185}
1186#endif /* CONFIG_SMP */
1185 1187
1186static void init_rq_hrtick(struct rq *rq) 1188static void init_rq_hrtick(struct rq *rq)
1187{ 1189{
@@ -4396,22 +4398,20 @@ do_wait_for_common(struct completion *x, long timeout, int state)
4396 signal_pending(current)) || 4398 signal_pending(current)) ||
4397 (state == TASK_KILLABLE && 4399 (state == TASK_KILLABLE &&
4398 fatal_signal_pending(current))) { 4400 fatal_signal_pending(current))) {
4399 __remove_wait_queue(&x->wait, &wait); 4401 timeout = -ERESTARTSYS;
4400 return -ERESTARTSYS; 4402 break;
4401 } 4403 }
4402 __set_current_state(state); 4404 __set_current_state(state);
4403 spin_unlock_irq(&x->wait.lock); 4405 spin_unlock_irq(&x->wait.lock);
4404 timeout = schedule_timeout(timeout); 4406 timeout = schedule_timeout(timeout);
4405 spin_lock_irq(&x->wait.lock); 4407 spin_lock_irq(&x->wait.lock);
4406 if (!timeout) { 4408 } while (!x->done && timeout);
4407 __remove_wait_queue(&x->wait, &wait);
4408 return timeout;
4409 }
4410 } while (!x->done);
4411 __remove_wait_queue(&x->wait, &wait); 4409 __remove_wait_queue(&x->wait, &wait);
4410 if (!x->done)
4411 return timeout;
4412 } 4412 }
4413 x->done--; 4413 x->done--;
4414 return timeout; 4414 return timeout ?: 1;
4415} 4415}
4416 4416
4417static long __sched 4417static long __sched
@@ -6877,7 +6877,12 @@ static int default_relax_domain_level = -1;
6877 6877
6878static int __init setup_relax_domain_level(char *str) 6878static int __init setup_relax_domain_level(char *str)
6879{ 6879{
6880 default_relax_domain_level = simple_strtoul(str, NULL, 0); 6880 unsigned long val;
6881
6882 val = simple_strtoul(str, NULL, 0);
6883 if (val < SD_LV_MAX)
6884 default_relax_domain_level = val;
6885
6881 return 1; 6886 return 1;
6882} 6887}
6883__setup("relax_domain_level=", setup_relax_domain_level); 6888__setup("relax_domain_level=", setup_relax_domain_level);
@@ -7236,6 +7241,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
7236} 7241}
7237 7242
7238/* 7243/*
7244 * Free current domain masks.
7245 * Called after all cpus are attached to NULL domain.
7246 */
7247static void free_sched_domains(void)
7248{
7249 ndoms_cur = 0;
7250 if (doms_cur != &fallback_doms)
7251 kfree(doms_cur);
7252 doms_cur = &fallback_doms;
7253}
7254
7255/*
7239 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7256 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7240 * For now this just excludes isolated cpus, but could be used to 7257 * For now this just excludes isolated cpus, but could be used to
7241 * exclude other special cases in the future. 7258 * exclude other special cases in the future.
@@ -7382,6 +7399,7 @@ int arch_reinit_sched_domains(void)
7382 get_online_cpus(); 7399 get_online_cpus();
7383 mutex_lock(&sched_domains_mutex); 7400 mutex_lock(&sched_domains_mutex);
7384 detach_destroy_domains(&cpu_online_map); 7401 detach_destroy_domains(&cpu_online_map);
7402 free_sched_domains();
7385 err = arch_init_sched_domains(&cpu_online_map); 7403 err = arch_init_sched_domains(&cpu_online_map);
7386 mutex_unlock(&sched_domains_mutex); 7404 mutex_unlock(&sched_domains_mutex);
7387 put_online_cpus(); 7405 put_online_cpus();
@@ -7467,6 +7485,7 @@ static int update_sched_domains(struct notifier_block *nfb,
7467 case CPU_DOWN_PREPARE: 7485 case CPU_DOWN_PREPARE:
7468 case CPU_DOWN_PREPARE_FROZEN: 7486 case CPU_DOWN_PREPARE_FROZEN:
7469 detach_destroy_domains(&cpu_online_map); 7487 detach_destroy_domains(&cpu_online_map);
7488 free_sched_domains();
7470 return NOTIFY_OK; 7489 return NOTIFY_OK;
7471 7490
7472 case CPU_UP_CANCELED: 7491 case CPU_UP_CANCELED:
@@ -7485,8 +7504,16 @@ static int update_sched_domains(struct notifier_block *nfb,
7485 return NOTIFY_DONE; 7504 return NOTIFY_DONE;
7486 } 7505 }
7487 7506
7507#ifndef CONFIG_CPUSETS
7508 /*
7509 * Create default domain partitioning if cpusets are disabled.
7510 * Otherwise we let cpusets rebuild the domains based on the
7511 * current setup.
7512 */
7513
7488 /* The hotplug lock is already held by cpu_up/cpu_down */ 7514 /* The hotplug lock is already held by cpu_up/cpu_down */
7489 arch_init_sched_domains(&cpu_online_map); 7515 arch_init_sched_domains(&cpu_online_map);
7516#endif
7490 7517
7491 return NOTIFY_OK; 7518 return NOTIFY_OK;
7492} 7519}
@@ -7626,7 +7653,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
7626 else 7653 else
7627 rt_se->rt_rq = parent->my_q; 7654 rt_se->rt_rq = parent->my_q;
7628 7655
7629 rt_se->rt_rq = &rq->rt;
7630 rt_se->my_q = rt_rq; 7656 rt_se->my_q = rt_rq;
7631 rt_se->parent = parent; 7657 rt_se->parent = parent;
7632 INIT_LIST_HEAD(&rt_se->run_list); 7658 INIT_LIST_HEAD(&rt_se->run_list);
@@ -8348,7 +8374,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
8348#ifdef CONFIG_CGROUP_SCHED 8374#ifdef CONFIG_CGROUP_SCHED
8349static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 8375static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8350{ 8376{
8351 struct task_group *tgi, *parent = tg->parent; 8377 struct task_group *tgi, *parent = tg ? tg->parent : NULL;
8352 unsigned long total = 0; 8378 unsigned long total = 0;
8353 8379
8354 if (!parent) { 8380 if (!parent) {