diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-07-09 09:15:43 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-07-17 06:05:14 -0400 |
commit | bbc8cb5baead9607309583b20873ab0cc8d89eaf (patch) | |
tree | 425cfb2772ef50c1858a7d833408ad1bcc423f51 /kernel/sched_fair.c | |
parent | 5343bdb8fd076f16edc9d113a9e35e2a1d1f4966 (diff) |
sched: Reduce update_group_power() calls
Currently we update cpu_power() too often, update_group_power() only
updates the local group's cpu_power but it gets called for all groups.
Furthermore, CPU_NEWLY_IDLE invocations will result in all cpus
calling it, even though a slow update of cpu_power is sufficient.
Therefore move the update under 'idle != CPU_NEWLY_IDLE &&
local_group' to reduce superfluous invocations.
Reported-by: Venkatesh Pallipadi <venki@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Suresh Siddha <suresh.b.siddha@intel.com>
LKML-Reference: <1278612989.1900.176.camel@laptop>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e44a591531a1..c9ac09760953 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -2425,14 +2425,14 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
2425 | * domains. In the newly idle case, we will allow all the cpu's | 2425 | * domains. In the newly idle case, we will allow all the cpu's |
2426 | * to do the newly idle load balance. | 2426 | * to do the newly idle load balance. |
2427 | */ | 2427 | */ |
2428 | if (idle != CPU_NEWLY_IDLE && local_group && | 2428 | if (idle != CPU_NEWLY_IDLE && local_group) { |
2429 | balance_cpu != this_cpu) { | 2429 | if (balance_cpu != this_cpu) { |
2430 | *balance = 0; | 2430 | *balance = 0; |
2431 | return; | 2431 | return; |
2432 | } | ||
2433 | update_group_power(sd, this_cpu); | ||
2432 | } | 2434 | } |
2433 | 2435 | ||
2434 | update_group_power(sd, this_cpu); | ||
2435 | |||
2436 | /* Adjust by relative CPU power of the group */ | 2436 | /* Adjust by relative CPU power of the group */ |
2437 | sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power; | 2437 | sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power; |
2438 | 2438 | ||