diff options
author | Philippe Longepe <philippe.longepe@linux.intel.com> | 2016-03-06 02:34:05 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-03-10 18:04:58 -0500 |
commit | 7349ec0470b62820ae226e30770b9d84a53ced9d (patch) | |
tree | c0ee221a93cae01c3f644203360890dc5b30053b | |
parent | a158bed5dc92bd83338225135d448958e0b3745d (diff) |
intel_pstate: Move intel_pstate_calc_busy() into get_target_pstate_use_performance()
The cpu_load algorithm doesn't need to invoke intel_pstate_calc_busy(),
so move that call from intel_pstate_sample() to
get_target_pstate_use_performance().
Signed-off-by: Philippe Longepe <philippe.longepe@linux.intel.com>
Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 5 |
1 files changed, 2 insertions, 3 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 5b5bfc1c90f1..95cc21713bb4 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -915,8 +915,6 @@ static inline void intel_pstate_sample(struct cpudata *cpu, u64 time) | |||
915 | cpu->sample.mperf -= cpu->prev_mperf; | 915 | cpu->sample.mperf -= cpu->prev_mperf; |
916 | cpu->sample.tsc -= cpu->prev_tsc; | 916 | cpu->sample.tsc -= cpu->prev_tsc; |
917 | 917 | ||
918 | intel_pstate_calc_busy(cpu); | ||
919 | |||
920 | cpu->prev_aperf = aperf; | 918 | cpu->prev_aperf = aperf; |
921 | cpu->prev_mperf = mperf; | 919 | cpu->prev_mperf = mperf; |
922 | cpu->prev_tsc = tsc; | 920 | cpu->prev_tsc = tsc; |
@@ -945,7 +943,6 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) | |||
945 | mperf = cpu->sample.mperf + delta_iowait_mperf; | 943 | mperf = cpu->sample.mperf + delta_iowait_mperf; |
946 | cpu->prev_cummulative_iowait = cummulative_iowait; | 944 | cpu->prev_cummulative_iowait = cummulative_iowait; |
947 | 945 | ||
948 | |||
949 | /* | 946 | /* |
950 | * The load can be estimated as the ratio of the mperf counter | 947 | * The load can be estimated as the ratio of the mperf counter |
951 | * running at a constant frequency during active periods | 948 | * running at a constant frequency during active periods |
@@ -963,6 +960,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) | |||
963 | int32_t core_busy, max_pstate, current_pstate, sample_ratio; | 960 | int32_t core_busy, max_pstate, current_pstate, sample_ratio; |
964 | u64 duration_ns; | 961 | u64 duration_ns; |
965 | 962 | ||
963 | intel_pstate_calc_busy(cpu); | ||
964 | |||
966 | /* | 965 | /* |
967 | * core_busy is the ratio of actual performance to max | 966 | * core_busy is the ratio of actual performance to max |
968 | * max_pstate is the max non turbo pstate available | 967 | * max_pstate is the max non turbo pstate available |