diff options
author | Peter Zijlstra <peterz@infradead.org> | 2012-02-11 00:05:00 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-02-10 10:17:13 -0500 |
commit | 606dba2e289446600a0b68422ed2019af5355c12 (patch) | |
tree | 286562f6ddcca34f3f029b68507b820f0c81b707 /kernel | |
parent | fed14d45f945042a15b09de48d7d3d58d9455fc4 (diff) |
sched: Push put_prev_task() into pick_next_task()
In order to avoid having to do put/set on a whole cgroup hierarchy
when we context switch, push the put into pick_next_task() so that
both operations are in the same function. Further changes then allow
us to possibly optimize away redundant work.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1328936700.2476.17.camel@laptop
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 21 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 5 | ||||
-rw-r--r-- | kernel/sched/fair.c | 6 | ||||
-rw-r--r-- | kernel/sched/idle_task.c | 6 | ||||
-rw-r--r-- | kernel/sched/rt.c | 27 | ||||
-rw-r--r-- | kernel/sched/sched.h | 8 | ||||
-rw-r--r-- | kernel/sched/stop_task.c | 16 |
7 files changed, 55 insertions, 34 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 417cf657a606..dedb5f07666e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2579,18 +2579,11 @@ static inline void schedule_debug(struct task_struct *prev) | |||
2579 | schedstat_inc(this_rq(), sched_count); | 2579 | schedstat_inc(this_rq(), sched_count); |
2580 | } | 2580 | } |
2581 | 2581 | ||
2582 | static void put_prev_task(struct rq *rq, struct task_struct *prev) | ||
2583 | { | ||
2584 | if (prev->on_rq || rq->skip_clock_update < 0) | ||
2585 | update_rq_clock(rq); | ||
2586 | prev->sched_class->put_prev_task(rq, prev); | ||
2587 | } | ||
2588 | |||
2589 | /* | 2582 | /* |
2590 | * Pick up the highest-prio task: | 2583 | * Pick up the highest-prio task: |
2591 | */ | 2584 | */ |
2592 | static inline struct task_struct * | 2585 | static inline struct task_struct * |
2593 | pick_next_task(struct rq *rq) | 2586 | pick_next_task(struct rq *rq, struct task_struct *prev) |
2594 | { | 2587 | { |
2595 | const struct sched_class *class; | 2588 | const struct sched_class *class; |
2596 | struct task_struct *p; | 2589 | struct task_struct *p; |
@@ -2600,13 +2593,13 @@ pick_next_task(struct rq *rq) | |||
2600 | * the fair class we can call that function directly: | 2593 | * the fair class we can call that function directly: |
2601 | */ | 2594 | */ |
2602 | if (likely(rq->nr_running == rq->cfs.h_nr_running)) { | 2595 | if (likely(rq->nr_running == rq->cfs.h_nr_running)) { |
2603 | p = fair_sched_class.pick_next_task(rq); | 2596 | p = fair_sched_class.pick_next_task(rq, prev); |
2604 | if (likely(p)) | 2597 | if (likely(p)) |
2605 | return p; | 2598 | return p; |
2606 | } | 2599 | } |
2607 | 2600 | ||
2608 | for_each_class(class) { | 2601 | for_each_class(class) { |
2609 | p = class->pick_next_task(rq); | 2602 | p = class->pick_next_task(rq, prev); |
2610 | if (p) | 2603 | if (p) |
2611 | return p; | 2604 | return p; |
2612 | } | 2605 | } |
@@ -2714,8 +2707,10 @@ need_resched: | |||
2714 | rq->idle_stamp = 0; | 2707 | rq->idle_stamp = 0; |
2715 | } | 2708 | } |
2716 | 2709 | ||
2717 | put_prev_task(rq, prev); | 2710 | if (prev->on_rq || rq->skip_clock_update < 0) |
2718 | next = pick_next_task(rq); | 2711 | update_rq_clock(rq); |
2712 | |||
2713 | next = pick_next_task(rq, prev); | ||
2719 | clear_tsk_need_resched(prev); | 2714 | clear_tsk_need_resched(prev); |
2720 | clear_preempt_need_resched(); | 2715 | clear_preempt_need_resched(); |
2721 | rq->skip_clock_update = 0; | 2716 | rq->skip_clock_update = 0; |
@@ -4748,7 +4743,7 @@ static void migrate_tasks(unsigned int dead_cpu) | |||
4748 | if (rq->nr_running == 1) | 4743 | if (rq->nr_running == 1) |
4749 | break; | 4744 | break; |
4750 | 4745 | ||
4751 | next = pick_next_task(rq); | 4746 | next = pick_next_task(rq, NULL); |
4752 | BUG_ON(!next); | 4747 | BUG_ON(!next); |
4753 | next->sched_class->put_prev_task(rq, next); | 4748 | next->sched_class->put_prev_task(rq, next); |
4754 | 4749 | ||
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index b5700bceee55..50797d576080 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -990,7 +990,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, | |||
990 | return rb_entry(left, struct sched_dl_entity, rb_node); | 990 | return rb_entry(left, struct sched_dl_entity, rb_node); |
991 | } | 991 | } |
992 | 992 | ||
993 | struct task_struct *pick_next_task_dl(struct rq *rq) | 993 | struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) |
994 | { | 994 | { |
995 | struct sched_dl_entity *dl_se; | 995 | struct sched_dl_entity *dl_se; |
996 | struct task_struct *p; | 996 | struct task_struct *p; |
@@ -1001,6 +1001,9 @@ struct task_struct *pick_next_task_dl(struct rq *rq) | |||
1001 | if (unlikely(!dl_rq->dl_nr_running)) | 1001 | if (unlikely(!dl_rq->dl_nr_running)) |
1002 | return NULL; | 1002 | return NULL; |
1003 | 1003 | ||
1004 | if (prev) | ||
1005 | prev->sched_class->put_prev_task(rq, prev); | ||
1006 | |||
1004 | dl_se = pick_next_dl_entity(rq, dl_rq); | 1007 | dl_se = pick_next_dl_entity(rq, dl_rq); |
1005 | BUG_ON(!dl_se); | 1008 | BUG_ON(!dl_se); |
1006 | 1009 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 748a7ac3388f..c4bb0ac26a7c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -4655,7 +4655,8 @@ preempt: | |||
4655 | set_last_buddy(se); | 4655 | set_last_buddy(se); |
4656 | } | 4656 | } |
4657 | 4657 | ||
4658 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 4658 | static struct task_struct * |
4659 | pick_next_task_fair(struct rq *rq, struct task_struct *prev) | ||
4659 | { | 4660 | { |
4660 | struct task_struct *p; | 4661 | struct task_struct *p; |
4661 | struct cfs_rq *cfs_rq = &rq->cfs; | 4662 | struct cfs_rq *cfs_rq = &rq->cfs; |
@@ -4664,6 +4665,9 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) | |||
4664 | if (!cfs_rq->nr_running) | 4665 | if (!cfs_rq->nr_running) |
4665 | return NULL; | 4666 | return NULL; |
4666 | 4667 | ||
4668 | if (prev) | ||
4669 | prev->sched_class->put_prev_task(rq, prev); | ||
4670 | |||
4667 | do { | 4671 | do { |
4668 | se = pick_next_entity(cfs_rq); | 4672 | se = pick_next_entity(cfs_rq); |
4669 | set_next_entity(cfs_rq, se); | 4673 | set_next_entity(cfs_rq, se); |
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 516c3d9ceea1..e5c922ac40ce 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c | |||
@@ -33,8 +33,12 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl | |||
33 | resched_task(rq->idle); | 33 | resched_task(rq->idle); |
34 | } | 34 | } |
35 | 35 | ||
36 | static struct task_struct *pick_next_task_idle(struct rq *rq) | 36 | static struct task_struct * |
37 | pick_next_task_idle(struct rq *rq, struct task_struct *prev) | ||
37 | { | 38 | { |
39 | if (prev) | ||
40 | prev->sched_class->put_prev_task(rq, prev); | ||
41 | |||
38 | schedstat_inc(rq, sched_goidle); | 42 | schedstat_inc(rq, sched_goidle); |
39 | #ifdef CONFIG_SMP | 43 | #ifdef CONFIG_SMP |
40 | /* Trigger the post schedule to do an idle_enter for CFS */ | 44 | /* Trigger the post schedule to do an idle_enter for CFS */ |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a2740b775b45..a15ca1c0c7bf 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -1310,15 +1310,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) | |||
1310 | { | 1310 | { |
1311 | struct sched_rt_entity *rt_se; | 1311 | struct sched_rt_entity *rt_se; |
1312 | struct task_struct *p; | 1312 | struct task_struct *p; |
1313 | struct rt_rq *rt_rq; | 1313 | struct rt_rq *rt_rq = &rq->rt; |
1314 | |||
1315 | rt_rq = &rq->rt; | ||
1316 | |||
1317 | if (!rt_rq->rt_nr_running) | ||
1318 | return NULL; | ||
1319 | |||
1320 | if (rt_rq_throttled(rt_rq)) | ||
1321 | return NULL; | ||
1322 | 1314 | ||
1323 | do { | 1315 | do { |
1324 | rt_se = pick_next_rt_entity(rq, rt_rq); | 1316 | rt_se = pick_next_rt_entity(rq, rt_rq); |
@@ -1332,9 +1324,22 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) | |||
1332 | return p; | 1324 | return p; |
1333 | } | 1325 | } |
1334 | 1326 | ||
1335 | static struct task_struct *pick_next_task_rt(struct rq *rq) | 1327 | static struct task_struct * |
1328 | pick_next_task_rt(struct rq *rq, struct task_struct *prev) | ||
1336 | { | 1329 | { |
1337 | struct task_struct *p = _pick_next_task_rt(rq); | 1330 | struct task_struct *p; |
1331 | struct rt_rq *rt_rq = &rq->rt; | ||
1332 | |||
1333 | if (!rt_rq->rt_nr_running) | ||
1334 | return NULL; | ||
1335 | |||
1336 | if (rt_rq_throttled(rt_rq)) | ||
1337 | return NULL; | ||
1338 | |||
1339 | if (prev) | ||
1340 | prev->sched_class->put_prev_task(rq, prev); | ||
1341 | |||
1342 | p = _pick_next_task_rt(rq); | ||
1338 | 1343 | ||
1339 | /* The running task is never eligible for pushing */ | 1344 | /* The running task is never eligible for pushing */ |
1340 | if (p) | 1345 | if (p) |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index bb89991ee409..c534cf4181ab 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1105,7 +1105,13 @@ struct sched_class { | |||
1105 | 1105 | ||
1106 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); | 1106 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); |
1107 | 1107 | ||
1108 | struct task_struct * (*pick_next_task) (struct rq *rq); | 1108 | /* |
1109 | * It is the responsibility of the pick_next_task() method that will | ||
1110 | * return the next task to call put_prev_task() on the @prev task or | ||
1111 | * something equivalent. | ||
1112 | */ | ||
1113 | struct task_struct * (*pick_next_task) (struct rq *rq, | ||
1114 | struct task_struct *prev); | ||
1109 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1115 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
1110 | 1116 | ||
1111 | #ifdef CONFIG_SMP | 1117 | #ifdef CONFIG_SMP |
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index fdb6bb0b3356..a4147c9d2017 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c | |||
@@ -23,16 +23,20 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) | |||
23 | /* we're never preempted */ | 23 | /* we're never preempted */ |
24 | } | 24 | } |
25 | 25 | ||
26 | static struct task_struct *pick_next_task_stop(struct rq *rq) | 26 | static struct task_struct * |
27 | pick_next_task_stop(struct rq *rq, struct task_struct *prev) | ||
27 | { | 28 | { |
28 | struct task_struct *stop = rq->stop; | 29 | struct task_struct *stop = rq->stop; |
29 | 30 | ||
30 | if (stop && stop->on_rq) { | 31 | if (!stop || !stop->on_rq) |
31 | stop->se.exec_start = rq_clock_task(rq); | 32 | return NULL; |
32 | return stop; | ||
33 | } | ||
34 | 33 | ||
35 | return NULL; | 34 | if (prev) |
35 | prev->sched_class->put_prev_task(rq, prev); | ||
36 | |||
37 | stop->se.exec_start = rq_clock_task(rq); | ||
38 | |||
39 | return stop; | ||
36 | } | 40 | } |
37 | 41 | ||
38 | static void | 42 | static void |