aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2008-12-29 09:39:49 -0500
committerGregory Haskins <ghaskins@novell.com>2008-12-29 09:39:49 -0500
commita8728944efe23417e38bf22063f06d9d8ee21d59 (patch)
tree225f453912a50a4baf0fdbc578c31bd23d4780f6 /kernel/sched_rt.c
parente864c499d9e57805ae1f9e7ea404dd223759cd53 (diff)
sched: use highest_prio.curr for pull threshold
highest_prio.curr is actually a more accurate way to keep track of the pull_rt_task() threshold since it is always up to date, even if the "next" task migrates during double_lock. Therefore, stop looking at the "next" task object and simply use the highest_prio.curr. Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c31
1 files changed, 6 insertions, 25 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index ad36d7232236..f8fb3edadcaa 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1207,14 +1207,12 @@ static void push_rt_tasks(struct rq *rq)
1207static int pull_rt_task(struct rq *this_rq) 1207static int pull_rt_task(struct rq *this_rq)
1208{ 1208{
1209 int this_cpu = this_rq->cpu, ret = 0, cpu; 1209 int this_cpu = this_rq->cpu, ret = 0, cpu;
1210 struct task_struct *p, *next; 1210 struct task_struct *p;
1211 struct rq *src_rq; 1211 struct rq *src_rq;
1212 1212
1213 if (likely(!rt_overloaded(this_rq))) 1213 if (likely(!rt_overloaded(this_rq)))
1214 return 0; 1214 return 0;
1215 1215
1216 next = pick_next_task_rt(this_rq);
1217
1218 for_each_cpu(cpu, this_rq->rd->rto_mask) { 1216 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1219 if (this_cpu == cpu) 1217 if (this_cpu == cpu)
1220 continue; 1218 continue;
@@ -1223,17 +1221,9 @@ static int pull_rt_task(struct rq *this_rq)
1223 /* 1221 /*
1224 * We can potentially drop this_rq's lock in 1222 * We can potentially drop this_rq's lock in
1225 * double_lock_balance, and another CPU could 1223 * double_lock_balance, and another CPU could
1226 * steal our next task - hence we must cause 1224 * alter this_rq
1227 * the caller to recalculate the next task
1228 * in that case:
1229 */ 1225 */
1230 if (double_lock_balance(this_rq, src_rq)) { 1226 double_lock_balance(this_rq, src_rq);
1231 struct task_struct *old_next = next;
1232
1233 next = pick_next_task_rt(this_rq);
1234 if (next != old_next)
1235 ret = 1;
1236 }
1237 1227
1238 /* 1228 /*
1239 * Are there still pullable RT tasks? 1229 * Are there still pullable RT tasks?
@@ -1247,7 +1237,7 @@ static int pull_rt_task(struct rq *this_rq)
1247 * Do we have an RT task that preempts 1237 * Do we have an RT task that preempts
1248 * the to-be-scheduled task? 1238 * the to-be-scheduled task?
1249 */ 1239 */
1250 if (p && (!next || (p->prio < next->prio))) { 1240 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1251 WARN_ON(p == src_rq->curr); 1241 WARN_ON(p == src_rq->curr);
1252 WARN_ON(!p->se.on_rq); 1242 WARN_ON(!p->se.on_rq);
1253 1243
@@ -1257,12 +1247,9 @@ static int pull_rt_task(struct rq *this_rq)
1257 * This is just that p is wakeing up and hasn't 1247 * This is just that p is wakeing up and hasn't
1258 * had a chance to schedule. We only pull 1248 * had a chance to schedule. We only pull
1259 * p if it is lower in priority than the 1249 * p if it is lower in priority than the
1260 * current task on the run queue or 1250 * current task on the run queue
1261 * this_rq next task is lower in prio than
1262 * the current task on that rq.
1263 */ 1251 */
1264 if (p->prio < src_rq->curr->prio || 1252 if (p->prio < src_rq->curr->prio)
1265 (next && next->prio < src_rq->curr->prio))
1266 goto skip; 1253 goto skip;
1267 1254
1268 ret = 1; 1255 ret = 1;
@@ -1275,13 +1262,7 @@ static int pull_rt_task(struct rq *this_rq)
1275 * case there's an even higher prio task 1262 * case there's an even higher prio task
1276 * in another runqueue. (low likelyhood 1263 * in another runqueue. (low likelyhood
1277 * but possible) 1264 * but possible)
1278 *
1279 * Update next so that we won't pick a task
1280 * on another cpu with a priority lower (or equal)
1281 * than the one we just picked.
1282 */ 1265 */
1283 next = p;
1284
1285 } 1266 }
1286 skip: 1267 skip:
1287 double_unlock_balance(this_rq, src_rq); 1268 double_unlock_balance(this_rq, src_rq);