aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-08-19 06:33:03 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-19 07:10:09 -0400
commit6f0d5c390e4206dcb3804a5072a048fdb7d2b428 (patch)
tree60e25ec2076091c1deb9d414e84e276320be44bf /kernel
parentaf4491e51632d01fbc2b856ffa9ebcd4b38db68c (diff)
sched: rt-bandwidth accounting fix
It fixes an accounting bug where we would continue accumulating runtime even though the bandwidth control is disabled. This would lead to very long throttle periods once bandwidth control gets turned on again. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_rt.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 998ba54b4543..77340b04a538 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -438,9 +438,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
438{ 438{
439 u64 runtime = sched_rt_runtime(rt_rq); 439 u64 runtime = sched_rt_runtime(rt_rq);
440 440
441 if (runtime == RUNTIME_INF)
442 return 0;
443
444 if (rt_rq->rt_throttled) 441 if (rt_rq->rt_throttled)
445 return rt_rq_throttled(rt_rq); 442 return rt_rq_throttled(rt_rq);
446 443
@@ -491,9 +488,11 @@ static void update_curr_rt(struct rq *rq)
491 rt_rq = rt_rq_of_se(rt_se); 488 rt_rq = rt_rq_of_se(rt_se);
492 489
493 spin_lock(&rt_rq->rt_runtime_lock); 490 spin_lock(&rt_rq->rt_runtime_lock);
494 rt_rq->rt_time += delta_exec; 491 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
495 if (sched_rt_runtime_exceeded(rt_rq)) 492 rt_rq->rt_time += delta_exec;
496 resched_task(curr); 493 if (sched_rt_runtime_exceeded(rt_rq))
494 resched_task(curr);
495 }
497 spin_unlock(&rt_rq->rt_runtime_lock); 496 spin_unlock(&rt_rq->rt_runtime_lock);
498 } 497 }
499} 498}