diff options
author | Peter Zijlstra <peterz@infradead.org> | 2014-01-23 14:32:21 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-02-11 03:58:10 -0500 |
commit | 38033c37faab850ed5d33bb675c4de6c66be84d8 (patch) | |
tree | 7a00530a9a1346f29f8899ff949bf07a9e7db7ee /kernel/sched/fair.c | |
parent | 6c3b4d44ba2838f00614a5a2d777d4401e0bfd71 (diff) |
sched: Push down pre_schedule() and idle_balance()
This patch both merged idle_balance() and pre_schedule() and pushes
both of them into pick_next_task().
Conceptually pre_schedule() and idle_balance() are rather similar,
both are used to pull more work onto the current CPU.
We cannot however first move idle_balance() into pre_schedule_fair()
since there is no guarantee the last runnable task is a fair task, and
thus we would miss newidle balances.
Similarly, the dl and rt pre_schedule calls must be ran before
idle_balance() since their respective tasks have higher priority and
it would not do to delay their execution searching for less important
tasks first.
However, by noticing that pick_next_tasks() already traverses the
sched_class hierarchy in the right order, we can get the right
behaviour and do away with both calls.
We must however change the special case optimization to also require
that prev is of sched_class_fair, otherwise we can miss doing a dl or
rt pull where we needed one.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/n/tip-a8k6vvaebtn64nie345kx1je@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 26 |
1 files changed, 22 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a81b241ff70f..43b49fe077ab 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2577,7 +2577,8 @@ void idle_exit_fair(struct rq *this_rq) | |||
2577 | update_rq_runnable_avg(this_rq, 0); | 2577 | update_rq_runnable_avg(this_rq, 0); |
2578 | } | 2578 | } |
2579 | 2579 | ||
2580 | #else | 2580 | #else /* CONFIG_SMP */ |
2581 | |||
2581 | static inline void update_entity_load_avg(struct sched_entity *se, | 2582 | static inline void update_entity_load_avg(struct sched_entity *se, |
2582 | int update_cfs_rq) {} | 2583 | int update_cfs_rq) {} |
2583 | static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} | 2584 | static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} |
@@ -2589,7 +2590,7 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, | |||
2589 | int sleep) {} | 2590 | int sleep) {} |
2590 | static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, | 2591 | static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, |
2591 | int force_update) {} | 2592 | int force_update) {} |
2592 | #endif | 2593 | #endif /* CONFIG_SMP */ |
2593 | 2594 | ||
2594 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 2595 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
2595 | { | 2596 | { |
@@ -4682,9 +4683,10 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev) | |||
4682 | struct sched_entity *se; | 4683 | struct sched_entity *se; |
4683 | struct task_struct *p; | 4684 | struct task_struct *p; |
4684 | 4685 | ||
4686 | again: __maybe_unused | ||
4685 | #ifdef CONFIG_FAIR_GROUP_SCHED | 4687 | #ifdef CONFIG_FAIR_GROUP_SCHED |
4686 | if (!cfs_rq->nr_running) | 4688 | if (!cfs_rq->nr_running) |
4687 | return NULL; | 4689 | goto idle; |
4688 | 4690 | ||
4689 | if (!prev || prev->sched_class != &fair_sched_class) | 4691 | if (!prev || prev->sched_class != &fair_sched_class) |
4690 | goto simple; | 4692 | goto simple; |
@@ -4760,7 +4762,7 @@ simple: | |||
4760 | #endif | 4762 | #endif |
4761 | 4763 | ||
4762 | if (!cfs_rq->nr_running) | 4764 | if (!cfs_rq->nr_running) |
4763 | return NULL; | 4765 | goto idle; |
4764 | 4766 | ||
4765 | if (prev) | 4767 | if (prev) |
4766 | prev->sched_class->put_prev_task(rq, prev); | 4768 | prev->sched_class->put_prev_task(rq, prev); |
@@ -4777,6 +4779,22 @@ simple: | |||
4777 | hrtick_start_fair(rq, p); | 4779 | hrtick_start_fair(rq, p); |
4778 | 4780 | ||
4779 | return p; | 4781 | return p; |
4782 | |||
4783 | idle: | ||
4784 | #ifdef CONFIG_SMP | ||
4785 | idle_enter_fair(rq); | ||
4786 | /* | ||
4787 | * We must set idle_stamp _before_ calling idle_balance(), such that we | ||
4788 | * measure the duration of idle_balance() as idle time. | ||
4789 | */ | ||
4790 | rq->idle_stamp = rq_clock(rq); | ||
4791 | if (idle_balance(rq)) { /* drops rq->lock */ | ||
4792 | rq->idle_stamp = 0; | ||
4793 | goto again; | ||
4794 | } | ||
4795 | #endif | ||
4796 | |||
4797 | return NULL; | ||
4780 | } | 4798 | } |
4781 | 4799 | ||
4782 | /* | 4800 | /* |