diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 57 |
1 files changed, 35 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 186c6fd08acf..faa63b99217a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
223 | { | 223 | { |
224 | ktime_t now; | 224 | ktime_t now; |
225 | 225 | ||
226 | if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF) | 226 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
227 | return; | 227 | return; |
228 | 228 | ||
229 | if (hrtimer_active(&rt_b->rt_period_timer)) | 229 | if (hrtimer_active(&rt_b->rt_period_timer)) |
@@ -2266,16 +2266,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2266 | if (!sched_feat(SYNC_WAKEUPS)) | 2266 | if (!sched_feat(SYNC_WAKEUPS)) |
2267 | sync = 0; | 2267 | sync = 0; |
2268 | 2268 | ||
2269 | if (!sync) { | ||
2270 | if (current->se.avg_overlap < sysctl_sched_migration_cost && | ||
2271 | p->se.avg_overlap < sysctl_sched_migration_cost) | ||
2272 | sync = 1; | ||
2273 | } else { | ||
2274 | if (current->se.avg_overlap >= sysctl_sched_migration_cost || | ||
2275 | p->se.avg_overlap >= sysctl_sched_migration_cost) | ||
2276 | sync = 0; | ||
2277 | } | ||
2278 | |||
2279 | #ifdef CONFIG_SMP | 2269 | #ifdef CONFIG_SMP |
2280 | if (sched_feat(LB_WAKEUP_UPDATE)) { | 2270 | if (sched_feat(LB_WAKEUP_UPDATE)) { |
2281 | struct sched_domain *sd; | 2271 | struct sched_domain *sd; |
@@ -3890,19 +3880,24 @@ int select_nohz_load_balancer(int stop_tick) | |||
3890 | int cpu = smp_processor_id(); | 3880 | int cpu = smp_processor_id(); |
3891 | 3881 | ||
3892 | if (stop_tick) { | 3882 | if (stop_tick) { |
3893 | cpumask_set_cpu(cpu, nohz.cpu_mask); | ||
3894 | cpu_rq(cpu)->in_nohz_recently = 1; | 3883 | cpu_rq(cpu)->in_nohz_recently = 1; |
3895 | 3884 | ||
3896 | /* | 3885 | if (!cpu_active(cpu)) { |
3897 | * If we are going offline and still the leader, give up! | 3886 | if (atomic_read(&nohz.load_balancer) != cpu) |
3898 | */ | 3887 | return 0; |
3899 | if (!cpu_active(cpu) && | 3888 | |
3900 | atomic_read(&nohz.load_balancer) == cpu) { | 3889 | /* |
3890 | * If we are going offline and still the leader, | ||
3891 | * give up! | ||
3892 | */ | ||
3901 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3893 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
3902 | BUG(); | 3894 | BUG(); |
3895 | |||
3903 | return 0; | 3896 | return 0; |
3904 | } | 3897 | } |
3905 | 3898 | ||
3899 | cpumask_set_cpu(cpu, nohz.cpu_mask); | ||
3900 | |||
3906 | /* time for ilb owner also to sleep */ | 3901 | /* time for ilb owner also to sleep */ |
3907 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { | 3902 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
3908 | if (atomic_read(&nohz.load_balancer) == cpu) | 3903 | if (atomic_read(&nohz.load_balancer) == cpu) |
@@ -7014,20 +7009,26 @@ static void free_rootdomain(struct root_domain *rd) | |||
7014 | 7009 | ||
7015 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) | 7010 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
7016 | { | 7011 | { |
7012 | struct root_domain *old_rd = NULL; | ||
7017 | unsigned long flags; | 7013 | unsigned long flags; |
7018 | 7014 | ||
7019 | spin_lock_irqsave(&rq->lock, flags); | 7015 | spin_lock_irqsave(&rq->lock, flags); |
7020 | 7016 | ||
7021 | if (rq->rd) { | 7017 | if (rq->rd) { |
7022 | struct root_domain *old_rd = rq->rd; | 7018 | old_rd = rq->rd; |
7023 | 7019 | ||
7024 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) | 7020 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
7025 | set_rq_offline(rq); | 7021 | set_rq_offline(rq); |
7026 | 7022 | ||
7027 | cpumask_clear_cpu(rq->cpu, old_rd->span); | 7023 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
7028 | 7024 | ||
7029 | if (atomic_dec_and_test(&old_rd->refcount)) | 7025 | /* |
7030 | free_rootdomain(old_rd); | 7026 | * If we dont want to free the old_rt yet then |
7027 | * set old_rd to NULL to skip the freeing later | ||
7028 | * in this function: | ||
7029 | */ | ||
7030 | if (!atomic_dec_and_test(&old_rd->refcount)) | ||
7031 | old_rd = NULL; | ||
7031 | } | 7032 | } |
7032 | 7033 | ||
7033 | atomic_inc(&rd->refcount); | 7034 | atomic_inc(&rd->refcount); |
@@ -7038,6 +7039,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
7038 | set_rq_online(rq); | 7039 | set_rq_online(rq); |
7039 | 7040 | ||
7040 | spin_unlock_irqrestore(&rq->lock, flags); | 7041 | spin_unlock_irqrestore(&rq->lock, flags); |
7042 | |||
7043 | if (old_rd) | ||
7044 | free_rootdomain(old_rd); | ||
7041 | } | 7045 | } |
7042 | 7046 | ||
7043 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) | 7047 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) |
@@ -9285,6 +9289,16 @@ static int sched_rt_global_constraints(void) | |||
9285 | 9289 | ||
9286 | return ret; | 9290 | return ret; |
9287 | } | 9291 | } |
9292 | |||
9293 | int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) | ||
9294 | { | ||
9295 | /* Don't accept realtime tasks when there is no way for them to run */ | ||
9296 | if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) | ||
9297 | return 0; | ||
9298 | |||
9299 | return 1; | ||
9300 | } | ||
9301 | |||
9288 | #else /* !CONFIG_RT_GROUP_SCHED */ | 9302 | #else /* !CONFIG_RT_GROUP_SCHED */ |
9289 | static int sched_rt_global_constraints(void) | 9303 | static int sched_rt_global_constraints(void) |
9290 | { | 9304 | { |
@@ -9378,8 +9392,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
9378 | struct task_struct *tsk) | 9392 | struct task_struct *tsk) |
9379 | { | 9393 | { |
9380 | #ifdef CONFIG_RT_GROUP_SCHED | 9394 | #ifdef CONFIG_RT_GROUP_SCHED |
9381 | /* Don't accept realtime tasks when there is no way for them to run */ | 9395 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) |
9382 | if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0) | ||
9383 | return -EINVAL; | 9396 | return -EINVAL; |
9384 | #else | 9397 | #else |
9385 | /* We don't support RT-tasks being in separate groups */ | 9398 | /* We don't support RT-tasks being in separate groups */ |