diff options
author | Ben Segall <bsegall@google.com> | 2012-10-04 06:51:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-10-24 04:27:20 -0400 |
commit | 18bf2805d9b30cb823d4919b42cd230f59c7ce1f (patch) | |
tree | a3a820f6dc5f79ff7569f8fe0d60904e9657fb5a /kernel/sched/fair.c | |
parent | 9d85f21c94f7f7a84d0ba686c58aa6d9da58fdbb (diff) |
sched: Maintain per-rq runnable averages
Since runqueues do not have a corresponding sched_entity we instead embed a
sched_avg structure directly.
Signed-off-by: Ben Segall <bsegall@google.com>
Reviewed-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20120823141506.442637130@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 18 |
1 files changed, 16 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 16d67f9b6955..8c5468fcf10d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -1087,8 +1087,14 @@ static inline void update_entity_load_avg(struct sched_entity *se) | |||
1087 | __update_entity_runnable_avg(rq_of(cfs_rq_of(se))->clock_task, &se->avg, | 1087 | __update_entity_runnable_avg(rq_of(cfs_rq_of(se))->clock_task, &se->avg, |
1088 | se->on_rq); | 1088 | se->on_rq); |
1089 | } | 1089 | } |
1090 | |||
1091 | static inline void update_rq_runnable_avg(struct rq *rq, int runnable) | ||
1092 | { | ||
1093 | __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable); | ||
1094 | } | ||
1090 | #else | 1095 | #else |
1091 | static inline void update_entity_load_avg(struct sched_entity *se) {} | 1096 | static inline void update_entity_load_avg(struct sched_entity *se) {} |
1097 | static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} | ||
1092 | #endif | 1098 | #endif |
1093 | 1099 | ||
1094 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 1100 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
@@ -2340,8 +2346,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
2340 | update_cfs_shares(cfs_rq); | 2346 | update_cfs_shares(cfs_rq); |
2341 | } | 2347 | } |
2342 | 2348 | ||
2343 | if (!se) | 2349 | if (!se) { |
2350 | update_rq_runnable_avg(rq, rq->nr_running); | ||
2344 | inc_nr_running(rq); | 2351 | inc_nr_running(rq); |
2352 | } | ||
2345 | hrtick_update(rq); | 2353 | hrtick_update(rq); |
2346 | } | 2354 | } |
2347 | 2355 | ||
@@ -2399,8 +2407,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
2399 | update_cfs_shares(cfs_rq); | 2407 | update_cfs_shares(cfs_rq); |
2400 | } | 2408 | } |
2401 | 2409 | ||
2402 | if (!se) | 2410 | if (!se) { |
2403 | dec_nr_running(rq); | 2411 | dec_nr_running(rq); |
2412 | update_rq_runnable_avg(rq, 1); | ||
2413 | } | ||
2404 | hrtick_update(rq); | 2414 | hrtick_update(rq); |
2405 | } | 2415 | } |
2406 | 2416 | ||
@@ -4586,6 +4596,8 @@ void idle_balance(int this_cpu, struct rq *this_rq) | |||
4586 | if (this_rq->avg_idle < sysctl_sched_migration_cost) | 4596 | if (this_rq->avg_idle < sysctl_sched_migration_cost) |
4587 | return; | 4597 | return; |
4588 | 4598 | ||
4599 | update_rq_runnable_avg(this_rq, 1); | ||
4600 | |||
4589 | /* | 4601 | /* |
4590 | * Drop the rq->lock, but keep IRQ/preempt disabled. | 4602 | * Drop the rq->lock, but keep IRQ/preempt disabled. |
4591 | */ | 4603 | */ |
@@ -5083,6 +5095,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) | |||
5083 | cfs_rq = cfs_rq_of(se); | 5095 | cfs_rq = cfs_rq_of(se); |
5084 | entity_tick(cfs_rq, se, queued); | 5096 | entity_tick(cfs_rq, se, queued); |
5085 | } | 5097 | } |
5098 | |||
5099 | update_rq_runnable_avg(rq, 1); | ||
5086 | } | 5100 | } |
5087 | 5101 | ||
5088 | /* | 5102 | /* |