diff options
| author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2017-03-27 18:22:16 -0400 |
|---|---|---|
| committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2017-03-28 17:12:16 -0400 |
| commit | 2bfc4cbb5fd3848669f1b95fea793f63d8e77fa0 (patch) | |
| tree | 2bd87001b339a4062d78060edb5ad71330165eff | |
| parent | 8ca6ce37014e5a9b127fc076448eb95e2b366d05 (diff) | |
cpufreq: intel_pstate: Do not walk policy->cpus
intel_pstate_hwp_set() is the only function walking policy->cpus
in intel_pstate. The rest of the code simply assumes one CPU per
policy, including the initialization code.
Therefore it doesn't make sense for intel_pstate_hwp_set() to
walk policy->cpus as it is guaranteed to have only one bit set
for policy->cpu.
For this reason, rearrange intel_pstate_hwp_set() to take the CPU
number as the argument and drop the loop over policy->cpus from it.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
| -rw-r--r-- | drivers/cpufreq/intel_pstate.c | 124 |
1 files changed, 60 insertions, 64 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 6384557cea69..5236701958d0 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -792,84 +792,80 @@ static struct freq_attr *hwp_cpufreq_attrs[] = { | |||
| 792 | NULL, | 792 | NULL, |
| 793 | }; | 793 | }; |
| 794 | 794 | ||
| 795 | static void intel_pstate_hwp_set(struct cpufreq_policy *policy) | 795 | static void intel_pstate_hwp_set(unsigned int cpu) |
| 796 | { | 796 | { |
| 797 | int min, hw_min, max, hw_max, cpu; | 797 | struct cpudata *cpu_data = all_cpu_data[cpu]; |
| 798 | int min, hw_min, max, hw_max; | ||
| 798 | u64 value, cap; | 799 | u64 value, cap; |
| 800 | s16 epp; | ||
| 799 | 801 | ||
| 800 | for_each_cpu(cpu, policy->cpus) { | 802 | rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); |
| 801 | struct cpudata *cpu_data = all_cpu_data[cpu]; | 803 | hw_min = HWP_LOWEST_PERF(cap); |
| 802 | s16 epp; | 804 | if (global.no_turbo) |
| 803 | 805 | hw_max = HWP_GUARANTEED_PERF(cap); | |
| 804 | rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); | 806 | else |
| 805 | hw_min = HWP_LOWEST_PERF(cap); | 807 | hw_max = HWP_HIGHEST_PERF(cap); |
| 806 | if (global.no_turbo) | ||
| 807 | hw_max = HWP_GUARANTEED_PERF(cap); | ||
| 808 | else | ||
| 809 | hw_max = HWP_HIGHEST_PERF(cap); | ||
| 810 | |||
| 811 | max = fp_ext_toint(hw_max * cpu_data->max_perf); | ||
| 812 | if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) | ||
| 813 | min = max; | ||
| 814 | else | ||
| 815 | min = fp_ext_toint(hw_max * cpu_data->min_perf); | ||
| 816 | 808 | ||
| 817 | rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); | 809 | max = fp_ext_toint(hw_max * cpu_data->max_perf); |
| 810 | if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) | ||
| 811 | min = max; | ||
| 812 | else | ||
| 813 | min = fp_ext_toint(hw_max * cpu_data->min_perf); | ||
| 818 | 814 | ||
| 819 | value &= ~HWP_MIN_PERF(~0L); | 815 | rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); |
| 820 | value |= HWP_MIN_PERF(min); | ||
| 821 | 816 | ||
| 822 | value &= ~HWP_MAX_PERF(~0L); | 817 | value &= ~HWP_MIN_PERF(~0L); |
| 823 | value |= HWP_MAX_PERF(max); | 818 | value |= HWP_MIN_PERF(min); |
| 824 | 819 | ||
| 825 | if (cpu_data->epp_policy == cpu_data->policy) | 820 | value &= ~HWP_MAX_PERF(~0L); |
| 826 | goto skip_epp; | 821 | value |= HWP_MAX_PERF(max); |
| 827 | 822 | ||
| 828 | cpu_data->epp_policy = cpu_data->policy; | 823 | if (cpu_data->epp_policy == cpu_data->policy) |
| 824 | goto skip_epp; | ||
| 829 | 825 | ||
| 830 | if (cpu_data->epp_saved >= 0) { | 826 | cpu_data->epp_policy = cpu_data->policy; |
| 831 | epp = cpu_data->epp_saved; | ||
| 832 | cpu_data->epp_saved = -EINVAL; | ||
| 833 | goto update_epp; | ||
| 834 | } | ||
| 835 | 827 | ||
| 836 | if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { | 828 | if (cpu_data->epp_saved >= 0) { |
| 837 | epp = intel_pstate_get_epp(cpu_data, value); | 829 | epp = cpu_data->epp_saved; |
| 838 | cpu_data->epp_powersave = epp; | 830 | cpu_data->epp_saved = -EINVAL; |
| 839 | /* If EPP read was failed, then don't try to write */ | 831 | goto update_epp; |
| 840 | if (epp < 0) | 832 | } |
| 841 | goto skip_epp; | ||
| 842 | 833 | ||
| 834 | if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { | ||
| 835 | epp = intel_pstate_get_epp(cpu_data, value); | ||
| 836 | cpu_data->epp_powersave = epp; | ||
| 837 | /* If EPP read was failed, then don't try to write */ | ||
| 838 | if (epp < 0) | ||
| 839 | goto skip_epp; | ||
| 843 | 840 | ||
| 844 | epp = 0; | 841 | epp = 0; |
| 845 | } else { | 842 | } else { |
| 846 | /* skip setting EPP, when saved value is invalid */ | 843 | /* skip setting EPP, when saved value is invalid */ |
| 847 | if (cpu_data->epp_powersave < 0) | 844 | if (cpu_data->epp_powersave < 0) |
| 848 | goto skip_epp; | 845 | goto skip_epp; |
| 849 | 846 | ||
| 850 | /* | 847 | /* |
| 851 | * No need to restore EPP when it is not zero. This | 848 | * No need to restore EPP when it is not zero. This |
| 852 | * means: | 849 | * means: |
| 853 | * - Policy is not changed | 850 | * - Policy is not changed |
| 854 | * - user has manually changed | 851 | * - user has manually changed |
| 855 | * - Error reading EPB | 852 | * - Error reading EPB |
| 856 | */ | 853 | */ |
| 857 | epp = intel_pstate_get_epp(cpu_data, value); | 854 | epp = intel_pstate_get_epp(cpu_data, value); |
| 858 | if (epp) | 855 | if (epp) |
| 859 | goto skip_epp; | 856 | goto skip_epp; |
| 860 | 857 | ||
| 861 | epp = cpu_data->epp_powersave; | 858 | epp = cpu_data->epp_powersave; |
| 862 | } | 859 | } |
| 863 | update_epp: | 860 | update_epp: |
| 864 | if (static_cpu_has(X86_FEATURE_HWP_EPP)) { | 861 | if (static_cpu_has(X86_FEATURE_HWP_EPP)) { |
| 865 | value &= ~GENMASK_ULL(31, 24); | 862 | value &= ~GENMASK_ULL(31, 24); |
| 866 | value |= (u64)epp << 24; | 863 | value |= (u64)epp << 24; |
| 867 | } else { | 864 | } else { |
| 868 | intel_pstate_set_epb(cpu, epp); | 865 | intel_pstate_set_epb(cpu, epp); |
| 869 | } | ||
| 870 | skip_epp: | ||
| 871 | wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); | ||
| 872 | } | 866 | } |
| 867 | skip_epp: | ||
| 868 | wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); | ||
| 873 | } | 869 | } |
| 874 | 870 | ||
| 875 | static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) | 871 | static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy) |
| @@ -892,7 +888,7 @@ static int intel_pstate_resume(struct cpufreq_policy *policy) | |||
| 892 | mutex_lock(&intel_pstate_limits_lock); | 888 | mutex_lock(&intel_pstate_limits_lock); |
| 893 | 889 | ||
| 894 | all_cpu_data[policy->cpu]->epp_policy = 0; | 890 | all_cpu_data[policy->cpu]->epp_policy = 0; |
| 895 | intel_pstate_hwp_set(policy); | 891 | intel_pstate_hwp_set(policy->cpu); |
| 896 | 892 | ||
| 897 | mutex_unlock(&intel_pstate_limits_lock); | 893 | mutex_unlock(&intel_pstate_limits_lock); |
| 898 | 894 | ||
| @@ -2057,7 +2053,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 2057 | intel_pstate_set_update_util_hook(policy->cpu); | 2053 | intel_pstate_set_update_util_hook(policy->cpu); |
| 2058 | 2054 | ||
| 2059 | if (hwp_active) | 2055 | if (hwp_active) |
| 2060 | intel_pstate_hwp_set(policy); | 2056 | intel_pstate_hwp_set(policy->cpu); |
| 2061 | 2057 | ||
| 2062 | mutex_unlock(&intel_pstate_limits_lock); | 2058 | mutex_unlock(&intel_pstate_limits_lock); |
| 2063 | 2059 | ||
