aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2014-08-26 07:06:46 -0400
committerIngo Molnar <mingo@kernel.org>2014-09-19 06:35:26 -0400
commit65fdac08c264506ff95ee1e34ae066e308c9e6e3 (patch)
treeb24446233d1fb4e7fd7d213a5dcc103e33592850 /kernel/sched
parent05bfb65f52cbdabe26ebb629959416a6cffb034d (diff)
sched: Fix avg_load computation
The computation of avg_load and avg_load_per_task should only take into account the number of CFS tasks. The non-CFS tasks are already taken into account by decreasing the CPU's capacity and they will be tracked in the CPU's utilization (group_utilization) of the next patches. Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: riel@redhat.com Cc: Morten.Rasmussen@arm.com Cc: efault@gmx.de Cc: nicolas.pitre@linaro.org Cc: daniel.lezcano@linaro.org Cc: dietmar.eggemann@arm.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1409051215-16788-4-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 391eaf25a2aa..eb87229ed4af 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4095,7 +4095,7 @@ static unsigned long capacity_of(int cpu)
4095static unsigned long cpu_avg_load_per_task(int cpu) 4095static unsigned long cpu_avg_load_per_task(int cpu)
4096{ 4096{
4097 struct rq *rq = cpu_rq(cpu); 4097 struct rq *rq = cpu_rq(cpu);
4098 unsigned long nr_running = ACCESS_ONCE(rq->nr_running); 4098 unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running);
4099 unsigned long load_avg = rq->cfs.runnable_load_avg; 4099 unsigned long load_avg = rq->cfs.runnable_load_avg;
4100 4100
4101 if (nr_running) 4101 if (nr_running)
@@ -5985,7 +5985,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
5985 load = source_load(i, load_idx); 5985 load = source_load(i, load_idx);
5986 5986
5987 sgs->group_load += load; 5987 sgs->group_load += load;
5988 sgs->sum_nr_running += rq->nr_running; 5988 sgs->sum_nr_running += rq->cfs.h_nr_running;
5989 5989
5990 if (rq->nr_running > 1) 5990 if (rq->nr_running > 1)
5991 *overload = true; 5991 *overload = true;