aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-08-19 06:33:03 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-28 07:42:38 -0400
commitcc2991cf15ae92fa30b3ea9f56a8a5a337bd33c7 (patch)
treea286839539678bb2b36e7e059b50a2e575e93782 /kernel
parentf3ade837808121ff8bab9c56725f4fe40ec85a56 (diff)
sched: rt-bandwidth accounting fix
It fixes an accounting bug where we would continue accumulating runtime even though the bandwidth control is disabled. This would lead to very long throttle periods once bandwidth control gets turned on again. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_rt.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 07d9b3307907..552310798dad 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -440,9 +440,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
440{ 440{
441 u64 runtime = sched_rt_runtime(rt_rq); 441 u64 runtime = sched_rt_runtime(rt_rq);
442 442
443 if (runtime == RUNTIME_INF)
444 return 0;
445
446 if (rt_rq->rt_throttled) 443 if (rt_rq->rt_throttled)
447 return rt_rq_throttled(rt_rq); 444 return rt_rq_throttled(rt_rq);
448 445
@@ -493,9 +490,11 @@ static void update_curr_rt(struct rq *rq)
493 rt_rq = rt_rq_of_se(rt_se); 490 rt_rq = rt_rq_of_se(rt_se);
494 491
495 spin_lock(&rt_rq->rt_runtime_lock); 492 spin_lock(&rt_rq->rt_runtime_lock);
496 rt_rq->rt_time += delta_exec; 493 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
497 if (sched_rt_runtime_exceeded(rt_rq)) 494 rt_rq->rt_time += delta_exec;
498 resched_task(curr); 495 if (sched_rt_runtime_exceeded(rt_rq))
496 resched_task(curr);
497 }
499 spin_unlock(&rt_rq->rt_runtime_lock); 498 spin_unlock(&rt_rq->rt_runtime_lock);
500 } 499 }
501} 500}