aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJupyung Lee <jupyung@gmail.com>2009-11-17 04:51:40 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-09 04:03:06 -0500
commita65ac745e47e91f9d98dbf07f22ed0492e34d998 (patch)
tree246b0953cbf4e2d890d71d01cbfb070f8e4b1811 /kernel
parentcd29fe6f2637cc2ccbda5ac65f5332d6bf5fa3c6 (diff)
sched: Move update_curr() in check_preempt_wakeup() to avoid redundant call
If a RT task is woken up while a non-RT task is running, check_preempt_wakeup() is called to check whether the new task can preempt the old task. The function returns quickly without going deeper because it is apparent that a RT task can always preempt a non-RT task. In this situation, check_preempt_wakeup() always calls update_curr() to update vruntime value of the currently running task. However, the function call is unnecessary and redundant at that moment because (1) a non-RT task can always be preempted by a RT task regardless of its vruntime value, and (2) update_curr() will be called shortly when the context switch between two occurs. By moving update_curr() in check_preempt_wakeup(), we can avoid redundant call to update_curr(), slightly reducing the time taken to wake up RT tasks. Signed-off-by: Jupyung Lee <jupyung@gmail.com> [ Place update_curr() right before the wake_preempt_entity() call, which is the only thing that relies on the updated vruntime ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1258451500-6714-1-git-send-email-jupyung@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_fair.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 44ec80ccfa85..4dec18579c9a 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1651,8 +1651,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1651 int sync = wake_flags & WF_SYNC; 1651 int sync = wake_flags & WF_SYNC;
1652 int scale = cfs_rq->nr_running >= sched_nr_latency; 1652 int scale = cfs_rq->nr_running >= sched_nr_latency;
1653 1653
1654 update_curr(cfs_rq);
1655
1656 if (unlikely(rt_prio(p->prio))) { 1654 if (unlikely(rt_prio(p->prio))) {
1657 resched_task(curr); 1655 resched_task(curr);
1658 return; 1656 return;
@@ -1710,6 +1708,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1710 1708
1711 BUG_ON(!pse); 1709 BUG_ON(!pse);
1712 1710
1711 update_curr(cfs_rq);
1712
1713 if (wakeup_preempt_entity(se, pse) == 1) { 1713 if (wakeup_preempt_entity(se, pse) == 1) {
1714 resched_task(curr); 1714 resched_task(curr);
1715 /* 1715 /*