aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-03 07:20:03 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-15 10:51:28 -0400
commit8e6598af3f35629c37249a610cf13e73f70db279 (patch)
treea341d416329f5331e4f7c6834d637266ba389b78 /kernel/sched.c
parent47fe38fcff0517e67d395c039d2e26d2de688a60 (diff)
sched: Feature to disable APERF/MPERF cpu_power
I suspect a feed-back loop between cpuidle and the aperf/mperf cpu_power bits, where when we have idle C-states lower the ratio, which leads to lower cpu_power and then less load, which generates more idle time, etc.. Put in a knob to disable it. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c210321adcb9..e8e603bf8761 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3602,11 +3602,19 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
3602 unsigned long power = SCHED_LOAD_SCALE; 3602 unsigned long power = SCHED_LOAD_SCALE;
3603 struct sched_group *sdg = sd->groups; 3603 struct sched_group *sdg = sd->groups;
3604 3604
3605 power *= arch_scale_freq_power(sd, cpu); 3605 if (sched_feat(ARCH_POWER))
3606 power *= arch_scale_freq_power(sd, cpu);
3607 else
3608 power *= default_scale_freq_power(sd, cpu);
3609
3606 power >>= SCHED_LOAD_SHIFT; 3610 power >>= SCHED_LOAD_SHIFT;
3607 3611
3608 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { 3612 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
3609 power *= arch_scale_smt_power(sd, cpu); 3613 if (sched_feat(ARCH_POWER))
3614 power *= arch_scale_smt_power(sd, cpu);
3615 else
3616 power *= default_scale_smt_power(sd, cpu);
3617
3610 power >>= SCHED_LOAD_SHIFT; 3618 power >>= SCHED_LOAD_SHIFT;
3611 } 3619 }
3612 3620