aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorDirk Brandewie <dirk.j.brandewie@intel.com>2014-02-03 11:55:31 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-02-04 15:41:15 -0500
commitfcb6a15c2e7e76d493e6f91ea889ab40e1c643a4 (patch)
tree7522e9ed862c6a149df7893a98abb96cd9ec23c4 /drivers/cpufreq
parent38dbfb59d1175ef458d006556061adeaa8751b72 (diff)
intel_pstate: Take core C0 time into account for core busy calculation
Take non-idle time into account when calculating core busy time. This ensures that intel_pstate will notice a decrease in load. References: https://bugzilla.kernel.org/show_bug.cgi?id=66581 Cc: 3.10+ <stable@vger.kernel.org> # 3.10+ Signed-off-by: Dirk Brandewie <dirk.j.brandewie@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/intel_pstate.c21
1 files changed, 17 insertions, 4 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7e257b233602..79606f473f48 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -57,6 +57,7 @@ struct sample {
57 int32_t core_pct_busy; 57 int32_t core_pct_busy;
58 u64 aperf; 58 u64 aperf;
59 u64 mperf; 59 u64 mperf;
60 unsigned long long tsc;
60 int freq; 61 int freq;
61}; 62};
62 63
@@ -96,6 +97,7 @@ struct cpudata {
96 97
97 u64 prev_aperf; 98 u64 prev_aperf;
98 u64 prev_mperf; 99 u64 prev_mperf;
100 unsigned long long prev_tsc;
99 int sample_ptr; 101 int sample_ptr;
100 struct sample samples[SAMPLE_COUNT]; 102 struct sample samples[SAMPLE_COUNT];
101}; 103};
@@ -548,30 +550,41 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
548 struct sample *sample) 550 struct sample *sample)
549{ 551{
550 u64 core_pct; 552 u64 core_pct;
551 core_pct = div64_u64(int_tofp(sample->aperf * 100), 553 u64 c0_pct;
552 sample->mperf);
553 sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
554 554
555 sample->core_pct_busy = core_pct; 555 core_pct = div64_u64(sample->aperf * 100, sample->mperf);
556
557 c0_pct = div64_u64(sample->mperf * 100, sample->tsc);
558 sample->freq = fp_toint(
559 mul_fp(int_tofp(cpu->pstate.max_pstate),
560 int_tofp(core_pct * 1000)));
561
562 sample->core_pct_busy = mul_fp(int_tofp(core_pct),
563 div_fp(int_tofp(c0_pct + 1), int_tofp(100)));
556} 564}
557 565
558static inline void intel_pstate_sample(struct cpudata *cpu) 566static inline void intel_pstate_sample(struct cpudata *cpu)
559{ 567{
560 u64 aperf, mperf; 568 u64 aperf, mperf;
569 unsigned long long tsc;
561 570
562 rdmsrl(MSR_IA32_APERF, aperf); 571 rdmsrl(MSR_IA32_APERF, aperf);
563 rdmsrl(MSR_IA32_MPERF, mperf); 572 rdmsrl(MSR_IA32_MPERF, mperf);
573 tsc = native_read_tsc();
564 574
565 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; 575 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
566 cpu->samples[cpu->sample_ptr].aperf = aperf; 576 cpu->samples[cpu->sample_ptr].aperf = aperf;
567 cpu->samples[cpu->sample_ptr].mperf = mperf; 577 cpu->samples[cpu->sample_ptr].mperf = mperf;
578 cpu->samples[cpu->sample_ptr].tsc = tsc;
568 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; 579 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
569 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; 580 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
581 cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
570 582
571 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); 583 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
572 584
573 cpu->prev_aperf = aperf; 585 cpu->prev_aperf = aperf;
574 cpu->prev_mperf = mperf; 586 cpu->prev_mperf = mperf;
587 cpu->prev_tsc = tsc;
575} 588}
576 589
577static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 590static inline void intel_pstate_set_sample_time(struct cpudata *cpu)