aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c52
1 files changed, 41 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index eaf6751e7612..94ead43eda62 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1127,6 +1127,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
1127 return HRTIMER_NORESTART; 1127 return HRTIMER_NORESTART;
1128} 1128}
1129 1129
1130#ifdef CONFIG_SMP
1130static void hotplug_hrtick_disable(int cpu) 1131static void hotplug_hrtick_disable(int cpu)
1131{ 1132{
1132 struct rq *rq = cpu_rq(cpu); 1133 struct rq *rq = cpu_rq(cpu);
@@ -1182,6 +1183,7 @@ static void init_hrtick(void)
1182{ 1183{
1183 hotcpu_notifier(hotplug_hrtick, 0); 1184 hotcpu_notifier(hotplug_hrtick, 0);
1184} 1185}
1186#endif /* CONFIG_SMP */
1185 1187
1186static void init_rq_hrtick(struct rq *rq) 1188static void init_rq_hrtick(struct rq *rq)
1187{ 1189{
@@ -4396,22 +4398,20 @@ do_wait_for_common(struct completion *x, long timeout, int state)
4396 signal_pending(current)) || 4398 signal_pending(current)) ||
4397 (state == TASK_KILLABLE && 4399 (state == TASK_KILLABLE &&
4398 fatal_signal_pending(current))) { 4400 fatal_signal_pending(current))) {
4399 __remove_wait_queue(&x->wait, &wait); 4401 timeout = -ERESTARTSYS;
4400 return -ERESTARTSYS; 4402 break;
4401 } 4403 }
4402 __set_current_state(state); 4404 __set_current_state(state);
4403 spin_unlock_irq(&x->wait.lock); 4405 spin_unlock_irq(&x->wait.lock);
4404 timeout = schedule_timeout(timeout); 4406 timeout = schedule_timeout(timeout);
4405 spin_lock_irq(&x->wait.lock); 4407 spin_lock_irq(&x->wait.lock);
4406 if (!timeout) { 4408 } while (!x->done && timeout);
4407 __remove_wait_queue(&x->wait, &wait);
4408 return timeout;
4409 }
4410 } while (!x->done);
4411 __remove_wait_queue(&x->wait, &wait); 4409 __remove_wait_queue(&x->wait, &wait);
4410 if (!x->done)
4411 return timeout;
4412 } 4412 }
4413 x->done--; 4413 x->done--;
4414 return timeout; 4414 return timeout ?: 1;
4415} 4415}
4416 4416
4417static long __sched 4417static long __sched
@@ -5887,6 +5887,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
5887 next = pick_next_task(rq, rq->curr); 5887 next = pick_next_task(rq, rq->curr);
5888 if (!next) 5888 if (!next)
5889 break; 5889 break;
5890 next->sched_class->put_prev_task(rq, next);
5890 migrate_dead(dead_cpu, next); 5891 migrate_dead(dead_cpu, next);
5891 5892
5892 } 5893 }
@@ -6877,7 +6878,12 @@ static int default_relax_domain_level = -1;
6877 6878
6878static int __init setup_relax_domain_level(char *str) 6879static int __init setup_relax_domain_level(char *str)
6879{ 6880{
6880 default_relax_domain_level = simple_strtoul(str, NULL, 0); 6881 unsigned long val;
6882
6883 val = simple_strtoul(str, NULL, 0);
6884 if (val < SD_LV_MAX)
6885 default_relax_domain_level = val;
6886
6881 return 1; 6887 return 1;
6882} 6888}
6883__setup("relax_domain_level=", setup_relax_domain_level); 6889__setup("relax_domain_level=", setup_relax_domain_level);
@@ -7236,6 +7242,18 @@ void __attribute__((weak)) arch_update_cpu_topology(void)
7236} 7242}
7237 7243
7238/* 7244/*
7245 * Free current domain masks.
7246 * Called after all cpus are attached to NULL domain.
7247 */
7248static void free_sched_domains(void)
7249{
7250 ndoms_cur = 0;
7251 if (doms_cur != &fallback_doms)
7252 kfree(doms_cur);
7253 doms_cur = &fallback_doms;
7254}
7255
7256/*
7239 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 7257 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
7240 * For now this just excludes isolated cpus, but could be used to 7258 * For now this just excludes isolated cpus, but could be used to
7241 * exclude other special cases in the future. 7259 * exclude other special cases in the future.
@@ -7382,6 +7400,7 @@ int arch_reinit_sched_domains(void)
7382 get_online_cpus(); 7400 get_online_cpus();
7383 mutex_lock(&sched_domains_mutex); 7401 mutex_lock(&sched_domains_mutex);
7384 detach_destroy_domains(&cpu_online_map); 7402 detach_destroy_domains(&cpu_online_map);
7403 free_sched_domains();
7385 err = arch_init_sched_domains(&cpu_online_map); 7404 err = arch_init_sched_domains(&cpu_online_map);
7386 mutex_unlock(&sched_domains_mutex); 7405 mutex_unlock(&sched_domains_mutex);
7387 put_online_cpus(); 7406 put_online_cpus();
@@ -7467,6 +7486,7 @@ static int update_sched_domains(struct notifier_block *nfb,
7467 case CPU_DOWN_PREPARE: 7486 case CPU_DOWN_PREPARE:
7468 case CPU_DOWN_PREPARE_FROZEN: 7487 case CPU_DOWN_PREPARE_FROZEN:
7469 detach_destroy_domains(&cpu_online_map); 7488 detach_destroy_domains(&cpu_online_map);
7489 free_sched_domains();
7470 return NOTIFY_OK; 7490 return NOTIFY_OK;
7471 7491
7472 case CPU_UP_CANCELED: 7492 case CPU_UP_CANCELED:
@@ -7485,8 +7505,16 @@ static int update_sched_domains(struct notifier_block *nfb,
7485 return NOTIFY_DONE; 7505 return NOTIFY_DONE;
7486 } 7506 }
7487 7507
7508#ifndef CONFIG_CPUSETS
7509 /*
7510 * Create default domain partitioning if cpusets are disabled.
7511 * Otherwise we let cpusets rebuild the domains based on the
7512 * current setup.
7513 */
7514
7488 /* The hotplug lock is already held by cpu_up/cpu_down */ 7515 /* The hotplug lock is already held by cpu_up/cpu_down */
7489 arch_init_sched_domains(&cpu_online_map); 7516 arch_init_sched_domains(&cpu_online_map);
7517#endif
7490 7518
7491 return NOTIFY_OK; 7519 return NOTIFY_OK;
7492} 7520}
@@ -7626,7 +7654,6 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
7626 else 7654 else
7627 rt_se->rt_rq = parent->my_q; 7655 rt_se->rt_rq = parent->my_q;
7628 7656
7629 rt_se->rt_rq = &rq->rt;
7630 rt_se->my_q = rt_rq; 7657 rt_se->my_q = rt_rq;
7631 rt_se->parent = parent; 7658 rt_se->parent = parent;
7632 INIT_LIST_HEAD(&rt_se->run_list); 7659 INIT_LIST_HEAD(&rt_se->run_list);
@@ -8348,7 +8375,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
8348#ifdef CONFIG_CGROUP_SCHED 8375#ifdef CONFIG_CGROUP_SCHED
8349static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 8376static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8350{ 8377{
8351 struct task_group *tgi, *parent = tg->parent; 8378 struct task_group *tgi, *parent = tg ? tg->parent : NULL;
8352 unsigned long total = 0; 8379 unsigned long total = 0;
8353 8380
8354 if (!parent) { 8381 if (!parent) {
@@ -8475,6 +8502,9 @@ int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8475 rt_period = (u64)rt_period_us * NSEC_PER_USEC; 8502 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8476 rt_runtime = tg->rt_bandwidth.rt_runtime; 8503 rt_runtime = tg->rt_bandwidth.rt_runtime;
8477 8504
8505 if (rt_period == 0)
8506 return -EINVAL;
8507
8478 return tg_set_bandwidth(tg, rt_period, rt_runtime); 8508 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8479} 8509}
8480 8510