aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c57
1 files changed, 35 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8ee437a5ec1d..8e2558c2ba67 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
223{ 223{
224 ktime_t now; 224 ktime_t now;
225 225
226 if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF) 226 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
227 return; 227 return;
228 228
229 if (hrtimer_active(&rt_b->rt_period_timer)) 229 if (hrtimer_active(&rt_b->rt_period_timer))
@@ -2266,16 +2266,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2266 if (!sched_feat(SYNC_WAKEUPS)) 2266 if (!sched_feat(SYNC_WAKEUPS))
2267 sync = 0; 2267 sync = 0;
2268 2268
2269 if (!sync) {
2270 if (current->se.avg_overlap < sysctl_sched_migration_cost &&
2271 p->se.avg_overlap < sysctl_sched_migration_cost)
2272 sync = 1;
2273 } else {
2274 if (current->se.avg_overlap >= sysctl_sched_migration_cost ||
2275 p->se.avg_overlap >= sysctl_sched_migration_cost)
2276 sync = 0;
2277 }
2278
2279#ifdef CONFIG_SMP 2269#ifdef CONFIG_SMP
2280 if (sched_feat(LB_WAKEUP_UPDATE)) { 2270 if (sched_feat(LB_WAKEUP_UPDATE)) {
2281 struct sched_domain *sd; 2271 struct sched_domain *sd;
@@ -3890,19 +3880,24 @@ int select_nohz_load_balancer(int stop_tick)
3890 int cpu = smp_processor_id(); 3880 int cpu = smp_processor_id();
3891 3881
3892 if (stop_tick) { 3882 if (stop_tick) {
3893 cpumask_set_cpu(cpu, nohz.cpu_mask);
3894 cpu_rq(cpu)->in_nohz_recently = 1; 3883 cpu_rq(cpu)->in_nohz_recently = 1;
3895 3884
3896 /* 3885 if (!cpu_active(cpu)) {
3897 * If we are going offline and still the leader, give up! 3886 if (atomic_read(&nohz.load_balancer) != cpu)
3898 */ 3887 return 0;
3899 if (!cpu_active(cpu) && 3888
3900 atomic_read(&nohz.load_balancer) == cpu) { 3889 /*
3890 * If we are going offline and still the leader,
3891 * give up!
3892 */
3901 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) 3893 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3902 BUG(); 3894 BUG();
3895
3903 return 0; 3896 return 0;
3904 } 3897 }
3905 3898
3899 cpumask_set_cpu(cpu, nohz.cpu_mask);
3900
3906 /* time for ilb owner also to sleep */ 3901 /* time for ilb owner also to sleep */
3907 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { 3902 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
3908 if (atomic_read(&nohz.load_balancer) == cpu) 3903 if (atomic_read(&nohz.load_balancer) == cpu)
@@ -6949,20 +6944,26 @@ static void free_rootdomain(struct root_domain *rd)
6949 6944
6950static void rq_attach_root(struct rq *rq, struct root_domain *rd) 6945static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6951{ 6946{
6947 struct root_domain *old_rd = NULL;
6952 unsigned long flags; 6948 unsigned long flags;
6953 6949
6954 spin_lock_irqsave(&rq->lock, flags); 6950 spin_lock_irqsave(&rq->lock, flags);
6955 6951
6956 if (rq->rd) { 6952 if (rq->rd) {
6957 struct root_domain *old_rd = rq->rd; 6953 old_rd = rq->rd;
6958 6954
6959 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 6955 if (cpumask_test_cpu(rq->cpu, old_rd->online))
6960 set_rq_offline(rq); 6956 set_rq_offline(rq);
6961 6957
6962 cpumask_clear_cpu(rq->cpu, old_rd->span); 6958 cpumask_clear_cpu(rq->cpu, old_rd->span);
6963 6959
6964 if (atomic_dec_and_test(&old_rd->refcount)) 6960 /*
6965 free_rootdomain(old_rd); 6961 * If we dont want to free the old_rt yet then
6962 * set old_rd to NULL to skip the freeing later
6963 * in this function:
6964 */
6965 if (!atomic_dec_and_test(&old_rd->refcount))
6966 old_rd = NULL;
6966 } 6967 }
6967 6968
6968 atomic_inc(&rd->refcount); 6969 atomic_inc(&rd->refcount);
@@ -6973,6 +6974,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6973 set_rq_online(rq); 6974 set_rq_online(rq);
6974 6975
6975 spin_unlock_irqrestore(&rq->lock, flags); 6976 spin_unlock_irqrestore(&rq->lock, flags);
6977
6978 if (old_rd)
6979 free_rootdomain(old_rd);
6976} 6980}
6977 6981
6978static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) 6982static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
@@ -9220,6 +9224,16 @@ static int sched_rt_global_constraints(void)
9220 9224
9221 return ret; 9225 return ret;
9222} 9226}
9227
9228int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
9229{
9230 /* Don't accept realtime tasks when there is no way for them to run */
9231 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
9232 return 0;
9233
9234 return 1;
9235}
9236
9223#else /* !CONFIG_RT_GROUP_SCHED */ 9237#else /* !CONFIG_RT_GROUP_SCHED */
9224static int sched_rt_global_constraints(void) 9238static int sched_rt_global_constraints(void)
9225{ 9239{
@@ -9313,8 +9327,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9313 struct task_struct *tsk) 9327 struct task_struct *tsk)
9314{ 9328{
9315#ifdef CONFIG_RT_GROUP_SCHED 9329#ifdef CONFIG_RT_GROUP_SCHED
9316 /* Don't accept realtime tasks when there is no way for them to run */ 9330 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
9317 if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
9318 return -EINVAL; 9331 return -EINVAL;
9319#else 9332#else
9320 /* We don't support RT-tasks being in separate groups */ 9333 /* We don't support RT-tasks being in separate groups */