diff options
author | Peter Zijlstra <peterz@infradead.org> | 2012-02-11 00:05:00 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-02-10 10:17:13 -0500 |
commit | 606dba2e289446600a0b68422ed2019af5355c12 (patch) | |
tree | 286562f6ddcca34f3f029b68507b820f0c81b707 /kernel/sched/core.c | |
parent | fed14d45f945042a15b09de48d7d3d58d9455fc4 (diff) |
sched: Push put_prev_task() into pick_next_task()
In order to avoid having to do put/set on a whole cgroup hierarchy
when we context switch, push the put into pick_next_task() so that
both operations are in the same function. Further changes then allow
us to possibly optimize away redundant work.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1328936700.2476.17.camel@laptop
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 21 |
1 files changed, 8 insertions, 13 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 417cf657a606..dedb5f07666e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2579,18 +2579,11 @@ static inline void schedule_debug(struct task_struct *prev) | |||
2579 | schedstat_inc(this_rq(), sched_count); | 2579 | schedstat_inc(this_rq(), sched_count); |
2580 | } | 2580 | } |
2581 | 2581 | ||
2582 | static void put_prev_task(struct rq *rq, struct task_struct *prev) | ||
2583 | { | ||
2584 | if (prev->on_rq || rq->skip_clock_update < 0) | ||
2585 | update_rq_clock(rq); | ||
2586 | prev->sched_class->put_prev_task(rq, prev); | ||
2587 | } | ||
2588 | |||
2589 | /* | 2582 | /* |
2590 | * Pick up the highest-prio task: | 2583 | * Pick up the highest-prio task: |
2591 | */ | 2584 | */ |
2592 | static inline struct task_struct * | 2585 | static inline struct task_struct * |
2593 | pick_next_task(struct rq *rq) | 2586 | pick_next_task(struct rq *rq, struct task_struct *prev) |
2594 | { | 2587 | { |
2595 | const struct sched_class *class; | 2588 | const struct sched_class *class; |
2596 | struct task_struct *p; | 2589 | struct task_struct *p; |
@@ -2600,13 +2593,13 @@ pick_next_task(struct rq *rq) | |||
2600 | * the fair class we can call that function directly: | 2593 | * the fair class we can call that function directly: |
2601 | */ | 2594 | */ |
2602 | if (likely(rq->nr_running == rq->cfs.h_nr_running)) { | 2595 | if (likely(rq->nr_running == rq->cfs.h_nr_running)) { |
2603 | p = fair_sched_class.pick_next_task(rq); | 2596 | p = fair_sched_class.pick_next_task(rq, prev); |
2604 | if (likely(p)) | 2597 | if (likely(p)) |
2605 | return p; | 2598 | return p; |
2606 | } | 2599 | } |
2607 | 2600 | ||
2608 | for_each_class(class) { | 2601 | for_each_class(class) { |
2609 | p = class->pick_next_task(rq); | 2602 | p = class->pick_next_task(rq, prev); |
2610 | if (p) | 2603 | if (p) |
2611 | return p; | 2604 | return p; |
2612 | } | 2605 | } |
@@ -2714,8 +2707,10 @@ need_resched: | |||
2714 | rq->idle_stamp = 0; | 2707 | rq->idle_stamp = 0; |
2715 | } | 2708 | } |
2716 | 2709 | ||
2717 | put_prev_task(rq, prev); | 2710 | if (prev->on_rq || rq->skip_clock_update < 0) |
2718 | next = pick_next_task(rq); | 2711 | update_rq_clock(rq); |
2712 | |||
2713 | next = pick_next_task(rq, prev); | ||
2719 | clear_tsk_need_resched(prev); | 2714 | clear_tsk_need_resched(prev); |
2720 | clear_preempt_need_resched(); | 2715 | clear_preempt_need_resched(); |
2721 | rq->skip_clock_update = 0; | 2716 | rq->skip_clock_update = 0; |
@@ -4748,7 +4743,7 @@ static void migrate_tasks(unsigned int dead_cpu) | |||
4748 | if (rq->nr_running == 1) | 4743 | if (rq->nr_running == 1) |
4749 | break; | 4744 | break; |
4750 | 4745 | ||
4751 | next = pick_next_task(rq); | 4746 | next = pick_next_task(rq, NULL); |
4752 | BUG_ON(!next); | 4747 | BUG_ON(!next); |
4753 | next->sched_class->put_prev_task(rq, next); | 4748 | next->sched_class->put_prev_task(rq, next); |
4754 | 4749 | ||