aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-02 07:28:02 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-15 10:51:24 -0400
commitd6a59aa3a2b1ca8411884c833a313b33b5f76e20 (patch)
tree9a4626d9721c79add9eec752a7ff2bd61e5b58a1 /kernel
parentb8a543ea5a5896830a9969bacfd047f9d15940b2 (diff)
sched: Provide arch_scale_freq_power
Provide an ach specific hook for cpufreq based scaling of cpu_power. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> [ego@in.ibm.com: spotting bugs] LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c21
1 files changed, 19 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f0ccb8b926c8..c210321adcb9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3552,7 +3552,18 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3552} 3552}
3553#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */ 3553#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3554 3554
3555unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) 3555
3556unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
3557{
3558 return SCHED_LOAD_SCALE;
3559}
3560
3561unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
3562{
3563 return default_scale_freq_power(sd, cpu);
3564}
3565
3566unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
3556{ 3567{
3557 unsigned long weight = cpumask_weight(sched_domain_span(sd)); 3568 unsigned long weight = cpumask_weight(sched_domain_span(sd));
3558 unsigned long smt_gain = sd->smt_gain; 3569 unsigned long smt_gain = sd->smt_gain;
@@ -3562,6 +3573,11 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
3562 return smt_gain; 3573 return smt_gain;
3563} 3574}
3564 3575
3576unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
3577{
3578 return default_scale_smt_power(sd, cpu);
3579}
3580
3565unsigned long scale_rt_power(int cpu) 3581unsigned long scale_rt_power(int cpu)
3566{ 3582{
3567 struct rq *rq = cpu_rq(cpu); 3583 struct rq *rq = cpu_rq(cpu);
@@ -3586,7 +3602,8 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
3586 unsigned long power = SCHED_LOAD_SCALE; 3602 unsigned long power = SCHED_LOAD_SCALE;
3587 struct sched_group *sdg = sd->groups; 3603 struct sched_group *sdg = sd->groups;
3588 3604
3589 /* here we could scale based on cpufreq */ 3605 power *= arch_scale_freq_power(sd, cpu);
3606 power >>= SCHED_LOAD_SHIFT;
3590 3607
3591 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { 3608 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
3592 power *= arch_scale_smt_power(sd, cpu); 3609 power *= arch_scale_smt_power(sd, cpu);