aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-03 07:20:03 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-15 10:51:28 -0400
commit8e6598af3f35629c37249a610cf13e73f70db279 (patch)
treea341d416329f5331e4f7c6834d637266ba389b78 /kernel
parent47fe38fcff0517e67d395c039d2e26d2de688a60 (diff)
sched: Feature to disable APERF/MPERF cpu_power
I suspect a feed-back loop between cpuidle and the aperf/mperf cpu_power bits, where when we have idle C-states lower the ratio, which leads to lower cpu_power and then less load, which generates more idle time, etc.. Put in a knob to disable it. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c12
-rw-r--r--kernel/sched_features.h5
2 files changed, 15 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c210321adcb..e8e603bf876 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3602,11 +3602,19 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
3602 unsigned long power = SCHED_LOAD_SCALE; 3602 unsigned long power = SCHED_LOAD_SCALE;
3603 struct sched_group *sdg = sd->groups; 3603 struct sched_group *sdg = sd->groups;
3604 3604
3605 power *= arch_scale_freq_power(sd, cpu); 3605 if (sched_feat(ARCH_POWER))
3606 power *= arch_scale_freq_power(sd, cpu);
3607 else
3608 power *= default_scale_freq_power(sd, cpu);
3609
3606 power >>= SCHED_LOAD_SHIFT; 3610 power >>= SCHED_LOAD_SHIFT;
3607 3611
3608 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { 3612 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
3609 power *= arch_scale_smt_power(sd, cpu); 3613 if (sched_feat(ARCH_POWER))
3614 power *= arch_scale_smt_power(sd, cpu);
3615 else
3616 power *= default_scale_smt_power(sd, cpu);
3617
3610 power >>= SCHED_LOAD_SHIFT; 3618 power >>= SCHED_LOAD_SHIFT;
3611 } 3619 }
3612 3620
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index e98c2e8de1d..294e10edd3c 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -82,6 +82,11 @@ SCHED_FEAT(LAST_BUDDY, 1)
82 */ 82 */
83SCHED_FEAT(CACHE_HOT_BUDDY, 1) 83SCHED_FEAT(CACHE_HOT_BUDDY, 1)
84 84
85/*
86 * Use arch dependent cpu power functions
87 */
88SCHED_FEAT(ARCH_POWER, 0)
89
85SCHED_FEAT(HRTICK, 0) 90SCHED_FEAT(HRTICK, 0)
86SCHED_FEAT(DOUBLE_TICK, 0) 91SCHED_FEAT(DOUBLE_TICK, 0)
87SCHED_FEAT(LB_BIAS, 1) 92SCHED_FEAT(LB_BIAS, 1)