aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2013-06-19 22:18:51 -0400
committerIngo Molnar <mingo@kernel.org>2013-06-27 04:07:36 -0400
commita003a25b227d59ded9197ced109517f037d01c27 (patch)
tree879a7f31942addac4420006a5e7c871c36cc92b4 /kernel/sched/fair.c
parentb92486cbf2aa230d00f160664858495c81d2b37b (diff)
sched: Consider runnable load average in move_tasks()
Aside from using runnable load average in background, move_tasks is also the key function in load balance. We need consider the runnable load average in it in order to make it an apple to apple load comparison. Morten had caught a div u64 bug on ARM, thanks! Thanks-to: Morten Rasmussen <morten.rasmussen@arm.com> Signed-off-by: Alex Shi <alex.shi@intel.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1371694737-29336-8-git-send-email-alex.shi@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e6d82cae4910..7948bb825985 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4179,11 +4179,14 @@ static int tg_load_down(struct task_group *tg, void *data)
4179 long cpu = (long)data; 4179 long cpu = (long)data;
4180 4180
4181 if (!tg->parent) { 4181 if (!tg->parent) {
4182 load = cpu_rq(cpu)->load.weight; 4182 load = cpu_rq(cpu)->avg.load_avg_contrib;
4183 } else { 4183 } else {
4184 unsigned long tmp_rla;
4185 tmp_rla = tg->parent->cfs_rq[cpu]->runnable_load_avg + 1;
4186
4184 load = tg->parent->cfs_rq[cpu]->h_load; 4187 load = tg->parent->cfs_rq[cpu]->h_load;
4185 load *= tg->se[cpu]->load.weight; 4188 load *= tg->se[cpu]->avg.load_avg_contrib;
4186 load /= tg->parent->cfs_rq[cpu]->load.weight + 1; 4189 load /= tmp_rla;
4187 } 4190 }
4188 4191
4189 tg->cfs_rq[cpu]->h_load = load; 4192 tg->cfs_rq[cpu]->h_load = load;
@@ -4209,12 +4212,9 @@ static void update_h_load(long cpu)
4209static unsigned long task_h_load(struct task_struct *p) 4212static unsigned long task_h_load(struct task_struct *p)
4210{ 4213{
4211 struct cfs_rq *cfs_rq = task_cfs_rq(p); 4214 struct cfs_rq *cfs_rq = task_cfs_rq(p);
4212 unsigned long load;
4213
4214 load = p->se.load.weight;
4215 load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1);
4216 4215
4217 return load; 4216 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
4217 cfs_rq->runnable_load_avg + 1);
4218} 4218}
4219#else 4219#else
4220static inline void update_blocked_averages(int cpu) 4220static inline void update_blocked_averages(int cpu)
@@ -4227,7 +4227,7 @@ static inline void update_h_load(long cpu)
4227 4227
4228static unsigned long task_h_load(struct task_struct *p) 4228static unsigned long task_h_load(struct task_struct *p)
4229{ 4229{
4230 return p->se.load.weight; 4230 return p->se.avg.load_avg_contrib;
4231} 4231}
4232#endif 4232#endif
4233 4233