aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-19 08:22:25 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-20 04:26:00 -0400
commitb79f3833d81d54fc71d98c8064dc45f33a755a8a (patch)
tree863df388344bbd6e370a6f4f1c721418b919d368 /kernel/sched_rt.c
parentada18de2eb76961a4d4847f63291744c9e7beec4 (diff)
sched: rt: fix SMP bandwidth balancing for throttled groups
Now we exceed the runtime and get throttled - the period rollover tick will subtract the cpu quota from the runtime and check if we're below quota. However with this cpu having a very small portion of the runtime it will not refresh as fast as it should. Therefore, also rebalance the runtime when we're throttled. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: "Daniel K." <dk@uw.no> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c41
1 files changed, 29 insertions, 12 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 2e0ccdcf046a..87b2e3bf9472 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -228,6 +228,28 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
228 228
229#endif 229#endif
230 230
231#ifdef CONFIG_SMP
232static int do_balance_runtime(struct rt_rq *rt_rq);
233
234static int balance_runtime(struct rt_rq *rt_rq)
235{
236 int more = 0;
237
238 if (rt_rq->rt_time > rt_rq->rt_runtime) {
239 spin_unlock(&rt_rq->rt_runtime_lock);
240 more = do_balance_runtime(rt_rq);
241 spin_lock(&rt_rq->rt_runtime_lock);
242 }
243
244 return more;
245}
246#else
247static inline int balance_runtime(struct rt_rq *rt_rq)
248{
249 return 0;
250}
251#endif
252
231static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 253static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
232{ 254{
233 int i, idle = 1; 255 int i, idle = 1;
@@ -247,6 +269,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
247 u64 runtime; 269 u64 runtime;
248 270
249 spin_lock(&rt_rq->rt_runtime_lock); 271 spin_lock(&rt_rq->rt_runtime_lock);
272 if (rt_rq->rt_throttled)
273 balance_runtime(rt_rq);
250 runtime = rt_rq->rt_runtime; 274 runtime = rt_rq->rt_runtime;
251 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); 275 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
252 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { 276 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
@@ -267,7 +291,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
267} 291}
268 292
269#ifdef CONFIG_SMP 293#ifdef CONFIG_SMP
270static int balance_runtime(struct rt_rq *rt_rq) 294static int do_balance_runtime(struct rt_rq *rt_rq)
271{ 295{
272 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 296 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
273 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; 297 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
@@ -428,17 +452,10 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
428 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq)) 452 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
429 return 0; 453 return 0;
430 454
431#ifdef CONFIG_SMP 455 balance_runtime(rt_rq);
432 if (rt_rq->rt_time > runtime) { 456 runtime = sched_rt_runtime(rt_rq);
433 spin_unlock(&rt_rq->rt_runtime_lock); 457 if (runtime == RUNTIME_INF)
434 balance_runtime(rt_rq); 458 return 0;
435 spin_lock(&rt_rq->rt_runtime_lock);
436
437 runtime = sched_rt_runtime(rt_rq);
438 if (runtime == RUNTIME_INF)
439 return 0;
440 }
441#endif
442 459
443 if (rt_rq->rt_time > runtime) { 460 if (rt_rq->rt_time > runtime) {
444 rt_rq->rt_throttled = 1; 461 rt_rq->rt_throttled = 1;