diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-09-01 04:34:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-04 04:09:54 -0400 |
commit | ab29230e673c646292c90c8b9d378b9562145af0 (patch) | |
tree | 0b87a89add83d14f0cf6aae3b6322865985103a0 /kernel/sched.c | |
parent | a52bfd73589eaf88d9c95ad2c1de0b38a6b27972 (diff) |
sched: Implement dynamic cpu_power
Recompute the cpu_power for each cpu during load-balance.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Tested-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Acked-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Acked-by: Gautham R Shenoy <ego@in.ibm.com>
Cc: Balbir Singh <balbir@in.ibm.com>
LKML-Reference: <20090901083826.162033479@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 38 |
1 files changed, 35 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 55112261027b..036600fd70bb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3699,14 +3699,46 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds, | |||
3699 | } | 3699 | } |
3700 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ | 3700 | #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ |
3701 | 3701 | ||
3702 | static void update_sched_power(struct sched_domain *sd) | 3702 | unsigned long __weak arch_smt_gain(struct sched_domain *sd, int cpu) |
3703 | { | ||
3704 | unsigned long weight = cpumask_weight(sched_domain_span(sd)); | ||
3705 | unsigned long smt_gain = sd->smt_gain; | ||
3706 | |||
3707 | smt_gain /= weight; | ||
3708 | |||
3709 | return smt_gain; | ||
3710 | } | ||
3711 | |||
3712 | static void update_cpu_power(struct sched_domain *sd, int cpu) | ||
3713 | { | ||
3714 | unsigned long weight = cpumask_weight(sched_domain_span(sd)); | ||
3715 | unsigned long power = SCHED_LOAD_SCALE; | ||
3716 | struct sched_group *sdg = sd->groups; | ||
3717 | unsigned long old = sdg->__cpu_power; | ||
3718 | |||
3719 | /* here we could scale based on cpufreq */ | ||
3720 | |||
3721 | if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { | ||
3722 | power *= arch_smt_gain(sd, cpu); | ||
3723 | power >>= SCHED_LOAD_SHIFT; | ||
3724 | } | ||
3725 | |||
3726 | /* here we could scale based on RT time */ | ||
3727 | |||
3728 | if (power != old) { | ||
3729 | sdg->__cpu_power = power; | ||
3730 | sdg->reciprocal_cpu_power = reciprocal_value(power); | ||
3731 | } | ||
3732 | } | ||
3733 | |||
3734 | static void update_group_power(struct sched_domain *sd, int cpu) | ||
3703 | { | 3735 | { |
3704 | struct sched_domain *child = sd->child; | 3736 | struct sched_domain *child = sd->child; |
3705 | struct sched_group *group, *sdg = sd->groups; | 3737 | struct sched_group *group, *sdg = sd->groups; |
3706 | unsigned long power = sdg->__cpu_power; | 3738 | unsigned long power = sdg->__cpu_power; |
3707 | 3739 | ||
3708 | if (!child) { | 3740 | if (!child) { |
3709 | /* compute cpu power for this cpu */ | 3741 | update_cpu_power(sd, cpu); |
3710 | return; | 3742 | return; |
3711 | } | 3743 | } |
3712 | 3744 | ||
@@ -3749,7 +3781,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
3749 | if (local_group) { | 3781 | if (local_group) { |
3750 | balance_cpu = group_first_cpu(group); | 3782 | balance_cpu = group_first_cpu(group); |
3751 | if (balance_cpu == this_cpu) | 3783 | if (balance_cpu == this_cpu) |
3752 | update_sched_power(sd); | 3784 | update_group_power(sd, this_cpu); |
3753 | } | 3785 | } |
3754 | 3786 | ||
3755 | /* Tally up the load of all CPUs in the group */ | 3787 | /* Tally up the load of all CPUs in the group */ |