aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
authorKirill Tkhai <tkhai@yandex.ru>2014-03-14 18:15:07 -0400
committerIngo Molnar <mingo@kernel.org>2014-04-18 06:07:29 -0400
commit46383648b3c769fa74794ae6425ab993fc113bdb (patch)
treeb4d2c6f3801ecb13ef92e6ae763ee2ac6dd4a388 /kernel/sched/rt.c
parentf4ebcbc0d7e009783256c9daf76bc4b90e645c14 (diff)
sched: Revert commit 4c6c4e38c4e9 ("sched/core: Fix endless loop in pick_next_task()")
This reverts commit 4c6c4e38c4e9 ("sched/core: Fix endless loop in pick_next_task()"), which is not necessary after ("sched/rt: Substract number of tasks of throttled queues from rq->nr_running"). Signed-off-by: Kirill Tkhai <tkhai@yandex.ru> Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> [conflict resolution with stop task checking patch] Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1394835307.18748.34.camel@HP-250-G1-Notebook-PC Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 2add019ddbd0..7795e292f4c9 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -493,6 +493,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
493 dequeue_rt_entity(rt_se); 493 dequeue_rt_entity(rt_se);
494} 494}
495 495
496static inline int rt_rq_throttled(struct rt_rq *rt_rq)
497{
498 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
499}
500
496static int rt_se_boosted(struct sched_rt_entity *rt_se) 501static int rt_se_boosted(struct sched_rt_entity *rt_se)
497{ 502{
498 struct rt_rq *rt_rq = group_rt_rq(rt_se); 503 struct rt_rq *rt_rq = group_rt_rq(rt_se);
@@ -569,6 +574,11 @@ static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
569 dequeue_top_rt_rq(rt_rq); 574 dequeue_top_rt_rq(rt_rq);
570} 575}
571 576
577static inline int rt_rq_throttled(struct rt_rq *rt_rq)
578{
579 return rt_rq->rt_throttled;
580}
581
572static inline const struct cpumask *sched_rt_period_mask(void) 582static inline const struct cpumask *sched_rt_period_mask(void)
573{ 583{
574 return cpu_online_mask; 584 return cpu_online_mask;