aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c30
1 files changed, 24 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c1d0ed360088..8e2558c2ba67 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
223{ 223{
224 ktime_t now; 224 ktime_t now;
225 225
226 if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF) 226 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
227 return; 227 return;
228 228
229 if (hrtimer_active(&rt_b->rt_period_timer)) 229 if (hrtimer_active(&rt_b->rt_period_timer))
@@ -6944,20 +6944,26 @@ static void free_rootdomain(struct root_domain *rd)
6944 6944
6945static void rq_attach_root(struct rq *rq, struct root_domain *rd) 6945static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6946{ 6946{
6947 struct root_domain *old_rd = NULL;
6947 unsigned long flags; 6948 unsigned long flags;
6948 6949
6949 spin_lock_irqsave(&rq->lock, flags); 6950 spin_lock_irqsave(&rq->lock, flags);
6950 6951
6951 if (rq->rd) { 6952 if (rq->rd) {
6952 struct root_domain *old_rd = rq->rd; 6953 old_rd = rq->rd;
6953 6954
6954 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 6955 if (cpumask_test_cpu(rq->cpu, old_rd->online))
6955 set_rq_offline(rq); 6956 set_rq_offline(rq);
6956 6957
6957 cpumask_clear_cpu(rq->cpu, old_rd->span); 6958 cpumask_clear_cpu(rq->cpu, old_rd->span);
6958 6959
6959 if (atomic_dec_and_test(&old_rd->refcount)) 6960 /*
6960 free_rootdomain(old_rd); 6961 * If we dont want to free the old_rt yet then
6962 * set old_rd to NULL to skip the freeing later
6963 * in this function:
6964 */
6965 if (!atomic_dec_and_test(&old_rd->refcount))
6966 old_rd = NULL;
6961 } 6967 }
6962 6968
6963 atomic_inc(&rd->refcount); 6969 atomic_inc(&rd->refcount);
@@ -6968,6 +6974,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6968 set_rq_online(rq); 6974 set_rq_online(rq);
6969 6975
6970 spin_unlock_irqrestore(&rq->lock, flags); 6976 spin_unlock_irqrestore(&rq->lock, flags);
6977
6978 if (old_rd)
6979 free_rootdomain(old_rd);
6971} 6980}
6972 6981
6973static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) 6982static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
@@ -9215,6 +9224,16 @@ static int sched_rt_global_constraints(void)
9215 9224
9216 return ret; 9225 return ret;
9217} 9226}
9227
9228int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
9229{
9230 /* Don't accept realtime tasks when there is no way for them to run */
9231 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
9232 return 0;
9233
9234 return 1;
9235}
9236
9218#else /* !CONFIG_RT_GROUP_SCHED */ 9237#else /* !CONFIG_RT_GROUP_SCHED */
9219static int sched_rt_global_constraints(void) 9238static int sched_rt_global_constraints(void)
9220{ 9239{
@@ -9308,8 +9327,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
9308 struct task_struct *tsk) 9327 struct task_struct *tsk)
9309{ 9328{
9310#ifdef CONFIG_RT_GROUP_SCHED 9329#ifdef CONFIG_RT_GROUP_SCHED
9311 /* Don't accept realtime tasks when there is no way for them to run */ 9330 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
9312 if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
9313 return -EINVAL; 9331 return -EINVAL;
9314#else 9332#else
9315 /* We don't support RT-tasks being in separate groups */ 9333 /* We don't support RT-tasks being in separate groups */