aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-08-14 09:49:00 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-14 09:50:58 -0400
commitf1679d08480008e06fd619c71635ed33274e2595 (patch)
treebf38250a5b7c969a6b91ef1043f0cfdd02f966d4 /kernel
parent09f2724a786f76475ef2985cf84f5359c553aade (diff)
sched: fix rt-bandwidth hotplug race
When we hot-unplug a cpu and rebuild the sched-domain, all cpus will be detatched. Alex observed the case where a runqueue was stealing bandwidth from an already disabled runqueue to satisfy its own needs. Stop this by skipping over already disabled runqueues. Reported-by: Alex Nixon <alex.nixon@citrix.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Tested-by: Alex Nixon <alex.nixon@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_rt.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 6163e4cf885b..998ba54b4543 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -298,7 +298,7 @@ static void __disable_runtime(struct rq *rq)
298 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 298 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
299 s64 diff; 299 s64 diff;
300 300
301 if (iter == rt_rq) 301 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
302 continue; 302 continue;
303 303
304 spin_lock(&iter->rt_runtime_lock); 304 spin_lock(&iter->rt_runtime_lock);