diff options
author | Gregory Haskins <ghaskins@novell.com> | 2008-12-29 09:39:52 -0500 |
---|---|---|
committer | Gregory Haskins <ghaskins@novell.com> | 2008-12-29 09:39:52 -0500 |
commit | 967fc04671feea4dbf780c9e55a0bc8fcf68a14e (patch) | |
tree | 223f2bb8c59138cc70fbb0e438ae27819ebe1a92 /kernel/sched_rt.c | |
parent | 8f45e2b516201d1bf681e6026fa5276385def565 (diff) |
sched: add sched_class->needs_post_schedule() member
We currently run class->post_schedule() outside of the rq->lock, which
means that we need to test for the need to post_schedule outside of
the lock to avoid a forced reacquistion. This is currently not a problem
as we only look at rq->rt.overloaded. However, we want to enhance this
going forward to look at more state to reduce the need to post_schedule to
a bare minimum set. Therefore, we introduce a new member-func called
needs_post_schedule() which tests for the post_schedule condtion without
actually performing the work. Therefore it is safe to call this
function before the rq->lock is released, because we are guaranteed not
to drop the lock at an intermediate point (such as what post_schedule()
may do).
We will use this later in the series
[ rostedt: removed paranoid BUG_ON ]
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 24 |
1 files changed, 14 insertions, 10 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 8d33843cb2c4..b0b6ea4ed674 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1290,20 +1290,23 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) | |||
1290 | pull_rt_task(rq); | 1290 | pull_rt_task(rq); |
1291 | } | 1291 | } |
1292 | 1292 | ||
1293 | /* | ||
1294 | * assumes rq->lock is held | ||
1295 | */ | ||
1296 | static int needs_post_schedule_rt(struct rq *rq) | ||
1297 | { | ||
1298 | return rq->rt.overloaded ? 1 : 0; | ||
1299 | } | ||
1300 | |||
1293 | static void post_schedule_rt(struct rq *rq) | 1301 | static void post_schedule_rt(struct rq *rq) |
1294 | { | 1302 | { |
1295 | /* | 1303 | /* |
1296 | * If we have more than one rt_task queued, then | 1304 | * This is only called if needs_post_schedule_rt() indicates that |
1297 | * see if we can push the other rt_tasks off to other CPUS. | 1305 | * we need to push tasks away |
1298 | * Note we may release the rq lock, and since | ||
1299 | * the lock was owned by prev, we need to release it | ||
1300 | * first via finish_lock_switch and then reaquire it here. | ||
1301 | */ | 1306 | */ |
1302 | if (unlikely(rq->rt.overloaded)) { | 1307 | spin_lock_irq(&rq->lock); |
1303 | spin_lock_irq(&rq->lock); | 1308 | push_rt_tasks(rq); |
1304 | push_rt_tasks(rq); | 1309 | spin_unlock_irq(&rq->lock); |
1305 | spin_unlock_irq(&rq->lock); | ||
1306 | } | ||
1307 | } | 1310 | } |
1308 | 1311 | ||
1309 | /* | 1312 | /* |
@@ -1557,6 +1560,7 @@ static const struct sched_class rt_sched_class = { | |||
1557 | .rq_online = rq_online_rt, | 1560 | .rq_online = rq_online_rt, |
1558 | .rq_offline = rq_offline_rt, | 1561 | .rq_offline = rq_offline_rt, |
1559 | .pre_schedule = pre_schedule_rt, | 1562 | .pre_schedule = pre_schedule_rt, |
1563 | .needs_post_schedule = needs_post_schedule_rt, | ||
1560 | .post_schedule = post_schedule_rt, | 1564 | .post_schedule = post_schedule_rt, |
1561 | .task_wake_up = task_wake_up_rt, | 1565 | .task_wake_up = task_wake_up_rt, |
1562 | .switched_from = switched_from_rt, | 1566 | .switched_from = switched_from_rt, |