diff options
author | Gregory Haskins <ghaskins@novell.com> | 2009-07-29 11:08:47 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-02 08:26:10 -0400 |
commit | 3f029d3c6d62068d59301d90c18dbde8ee402107 (patch) | |
tree | c9c4e49bc0c8b919a81bf428180b9cffedbef389 /kernel/sched_rt.c | |
parent | c3a2ae3d93c0f10d29c071f599764d00b8de00cb (diff) |
sched: Enhance the pre/post scheduling logic
We currently have an explicit "needs_post" vtable method which
returns a stack variable for whether we should later run
post-schedule. This leads to an awkward exchange of the
variable as it bubbles back up out of the context switch. Peter
Zijlstra observed that this information could be stored in the
run-queue itself instead of handled on the stack.
Therefore, we revert to the method of having context_switch
return void, and update an internal rq->post_schedule variable
when we require further processing.
In addition, we fix a race condition where we try to access
current->sched_class without holding the rq->lock. This is
technically racy, as the sched-class could change out from
under us. Instead, we reference the per-rq post_schedule
variable with the runqueue unlocked, but with preemption
disabled to see if we need to reacquire the rq->lock.
Finally, we clean the code up slightly by removing the #ifdef
CONFIG_SMP conditionals from the schedule() call, and implement
some inline helper functions instead.
This patch passes checkpatch, and rt-migrate.
Signed-off-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20090729150422.17691.55590.stgit@dev.haskins.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 31 |
1 files changed, 11 insertions, 20 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 3918e01994e0..a8f89bc3e5eb 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1056,6 +1056,11 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) | |||
1056 | return p; | 1056 | return p; |
1057 | } | 1057 | } |
1058 | 1058 | ||
1059 | static inline int has_pushable_tasks(struct rq *rq) | ||
1060 | { | ||
1061 | return !plist_head_empty(&rq->rt.pushable_tasks); | ||
1062 | } | ||
1063 | |||
1059 | static struct task_struct *pick_next_task_rt(struct rq *rq) | 1064 | static struct task_struct *pick_next_task_rt(struct rq *rq) |
1060 | { | 1065 | { |
1061 | struct task_struct *p = _pick_next_task_rt(rq); | 1066 | struct task_struct *p = _pick_next_task_rt(rq); |
@@ -1064,6 +1069,12 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) | |||
1064 | if (p) | 1069 | if (p) |
1065 | dequeue_pushable_task(rq, p); | 1070 | dequeue_pushable_task(rq, p); |
1066 | 1071 | ||
1072 | /* | ||
1073 | * We detect this state here so that we can avoid taking the RQ | ||
1074 | * lock again later if there is no need to push | ||
1075 | */ | ||
1076 | rq->post_schedule = has_pushable_tasks(rq); | ||
1077 | |||
1067 | return p; | 1078 | return p; |
1068 | } | 1079 | } |
1069 | 1080 | ||
@@ -1262,11 +1273,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1262 | return lowest_rq; | 1273 | return lowest_rq; |
1263 | } | 1274 | } |
1264 | 1275 | ||
1265 | static inline int has_pushable_tasks(struct rq *rq) | ||
1266 | { | ||
1267 | return !plist_head_empty(&rq->rt.pushable_tasks); | ||
1268 | } | ||
1269 | |||
1270 | static struct task_struct *pick_next_pushable_task(struct rq *rq) | 1276 | static struct task_struct *pick_next_pushable_task(struct rq *rq) |
1271 | { | 1277 | { |
1272 | struct task_struct *p; | 1278 | struct task_struct *p; |
@@ -1466,23 +1472,9 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) | |||
1466 | pull_rt_task(rq); | 1472 | pull_rt_task(rq); |
1467 | } | 1473 | } |
1468 | 1474 | ||
1469 | /* | ||
1470 | * assumes rq->lock is held | ||
1471 | */ | ||
1472 | static int needs_post_schedule_rt(struct rq *rq) | ||
1473 | { | ||
1474 | return has_pushable_tasks(rq); | ||
1475 | } | ||
1476 | |||
1477 | static void post_schedule_rt(struct rq *rq) | 1475 | static void post_schedule_rt(struct rq *rq) |
1478 | { | 1476 | { |
1479 | /* | ||
1480 | * This is only called if needs_post_schedule_rt() indicates that | ||
1481 | * we need to push tasks away | ||
1482 | */ | ||
1483 | spin_lock_irq(&rq->lock); | ||
1484 | push_rt_tasks(rq); | 1477 | push_rt_tasks(rq); |
1485 | spin_unlock_irq(&rq->lock); | ||
1486 | } | 1478 | } |
1487 | 1479 | ||
1488 | /* | 1480 | /* |
@@ -1758,7 +1750,6 @@ static const struct sched_class rt_sched_class = { | |||
1758 | .rq_online = rq_online_rt, | 1750 | .rq_online = rq_online_rt, |
1759 | .rq_offline = rq_offline_rt, | 1751 | .rq_offline = rq_offline_rt, |
1760 | .pre_schedule = pre_schedule_rt, | 1752 | .pre_schedule = pre_schedule_rt, |
1761 | .needs_post_schedule = needs_post_schedule_rt, | ||
1762 | .post_schedule = post_schedule_rt, | 1753 | .post_schedule = post_schedule_rt, |
1763 | .task_wake_up = task_wake_up_rt, | 1754 | .task_wake_up = task_wake_up_rt, |
1764 | .switched_from = switched_from_rt, | 1755 | .switched_from = switched_from_rt, |