diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-08-15 13:47:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-09-02 02:27:36 -0400 |
commit | 38d0f7708543bcfa03d5ee55e8346f801b4a59c9 (patch) | |
tree | a5e7441ad2793c614b686a463a1d1be8c1d87a73 /kernel | |
parent | 147c5fc2bad780d8093b547f2baa204e78107faf (diff) |
sched/fair: Remove duplicate load_per_task computations
Since we already compute (but don't store) the sgs load_per_task value
in update_sg_lb_stats() we might as well store it and not re-compute
it later on.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-ym1vmljiwbzgdnnrwp9azftq@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/fair.c | 13 |
1 files changed, 2 insertions, 11 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4c6a8a5a789a..57952198b01e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -4476,7 +4476,6 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
4476 | { | 4476 | { |
4477 | unsigned long nr_running, max_nr_running, min_nr_running; | 4477 | unsigned long nr_running, max_nr_running, min_nr_running; |
4478 | unsigned long load, max_cpu_load, min_cpu_load; | 4478 | unsigned long load, max_cpu_load, min_cpu_load; |
4479 | unsigned long avg_load_per_task = 0; | ||
4480 | int i; | 4479 | int i; |
4481 | 4480 | ||
4482 | /* Tally up the load of all CPUs in the group */ | 4481 | /* Tally up the load of all CPUs in the group */ |
@@ -4531,9 +4530,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
4531 | * the hierarchy? | 4530 | * the hierarchy? |
4532 | */ | 4531 | */ |
4533 | if (sgs->sum_nr_running) | 4532 | if (sgs->sum_nr_running) |
4534 | avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; | 4533 | sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; |
4535 | 4534 | ||
4536 | if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && | 4535 | if ((max_cpu_load - min_cpu_load) >= sgs->load_per_task && |
4537 | (max_nr_running - min_nr_running) > 1) | 4536 | (max_nr_running - min_nr_running) > 1) |
4538 | sgs->group_imb = 1; | 4537 | sgs->group_imb = 1; |
4539 | 4538 | ||
@@ -4776,15 +4775,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
4776 | struct sg_lb_stats *local, *busiest; | 4775 | struct sg_lb_stats *local, *busiest; |
4777 | 4776 | ||
4778 | local = &sds->local_stat; | 4777 | local = &sds->local_stat; |
4779 | if (local->sum_nr_running) { | ||
4780 | local->load_per_task = | ||
4781 | local->sum_weighted_load / local->sum_nr_running; | ||
4782 | } | ||
4783 | |||
4784 | busiest = &sds->busiest_stat; | 4778 | busiest = &sds->busiest_stat; |
4785 | /* busiest must have some tasks */ | ||
4786 | busiest->load_per_task = | ||
4787 | busiest->sum_weighted_load / busiest->sum_nr_running; | ||
4788 | 4779 | ||
4789 | if (busiest->group_imb) { | 4780 | if (busiest->group_imb) { |
4790 | busiest->load_per_task = | 4781 | busiest->load_per_task = |