aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-17 20:25:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-17 20:25:14 -0400
commit8d940990f51df8b18b89f8ca0592904cfe02f556 (patch)
tree6464e7f7de21cd7e2aee9df7f84548197d4af30d
parent8841b5f0cdc71a0b8e42ef93d6eee9a081c67309 (diff)
parent8b766e05d8ec80b6b1daa2675509adcab6519038 (diff)
Merge tag 'pm-4.11-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management fixes from Rafael Wysocki: "These fix a few more intel_pstate issues and one small issue in the cpufreq core. Specifics: - Fix breakage in the intel_pstate's debugfs interface for PID controller tuning (Rafael Wysocki) - Fix computations related to P-state limits in intel_pstate to avoid excessive rounding errors leading to visible inaccuracies (Srinivas Pandruvada, Rafael Wysocki) - Add a missing newline to a message printed by one function in the cpufreq core and clean up that function (Rafael Wysocki)" * tag 'pm-4.11-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: cpufreq: Fix and clean up show_cpuinfo_cur_freq() cpufreq: intel_pstate: Avoid percentages in limits-related computations cpufreq: intel_pstate: Correct frequency setting in the HWP mode cpufreq: intel_pstate: Update pid_params.sample_rate_ns in pid_param_set()
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c64
2 files changed, 36 insertions, 36 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 38b9fdf854a4..b8ff617d449d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
680 char *buf) 680 char *buf)
681{ 681{
682 unsigned int cur_freq = __cpufreq_get(policy); 682 unsigned int cur_freq = __cpufreq_get(policy);
683 if (!cur_freq) 683
684 return sprintf(buf, "<unknown>"); 684 if (cur_freq)
685 return sprintf(buf, "%u\n", cur_freq); 685 return sprintf(buf, "%u\n", cur_freq);
686
687 return sprintf(buf, "<unknown>\n");
686} 688}
687 689
688/** 690/**
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 3d37219a0dd7..08e134ffba68 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y)
84 return div64_u64(x << EXT_FRAC_BITS, y); 84 return div64_u64(x << EXT_FRAC_BITS, y);
85} 85}
86 86
87static inline int32_t percent_ext_fp(int percent)
88{
89 return div_ext_fp(percent, 100);
90}
91
87/** 92/**
88 * struct sample - Store performance sample 93 * struct sample - Store performance sample
89 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average 94 * @core_avg_perf: Ratio of APERF/MPERF which is the actual average
@@ -845,12 +850,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
845 850
846static void intel_pstate_hwp_set(struct cpufreq_policy *policy) 851static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
847{ 852{
848 int min, hw_min, max, hw_max, cpu, range, adj_range; 853 int min, hw_min, max, hw_max, cpu;
849 struct perf_limits *perf_limits = limits; 854 struct perf_limits *perf_limits = limits;
850 u64 value, cap; 855 u64 value, cap;
851 856
852 for_each_cpu(cpu, policy->cpus) { 857 for_each_cpu(cpu, policy->cpus) {
853 int max_perf_pct, min_perf_pct;
854 struct cpudata *cpu_data = all_cpu_data[cpu]; 858 struct cpudata *cpu_data = all_cpu_data[cpu];
855 s16 epp; 859 s16 epp;
856 860
@@ -863,20 +867,15 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
863 hw_max = HWP_GUARANTEED_PERF(cap); 867 hw_max = HWP_GUARANTEED_PERF(cap);
864 else 868 else
865 hw_max = HWP_HIGHEST_PERF(cap); 869 hw_max = HWP_HIGHEST_PERF(cap);
866 range = hw_max - hw_min;
867 870
868 max_perf_pct = perf_limits->max_perf_pct; 871 min = fp_ext_toint(hw_max * perf_limits->min_perf);
869 min_perf_pct = perf_limits->min_perf_pct;
870 872
871 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 873 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
872 adj_range = min_perf_pct * range / 100; 874
873 min = hw_min + adj_range;
874 value &= ~HWP_MIN_PERF(~0L); 875 value &= ~HWP_MIN_PERF(~0L);
875 value |= HWP_MIN_PERF(min); 876 value |= HWP_MIN_PERF(min);
876 877
877 adj_range = max_perf_pct * range / 100; 878 max = fp_ext_toint(hw_max * perf_limits->max_perf);
878 max = hw_min + adj_range;
879
880 value &= ~HWP_MAX_PERF(~0L); 879 value &= ~HWP_MAX_PERF(~0L);
881 value |= HWP_MAX_PERF(max); 880 value |= HWP_MAX_PERF(max);
882 881
@@ -989,6 +988,7 @@ static void intel_pstate_update_policies(void)
989static int pid_param_set(void *data, u64 val) 988static int pid_param_set(void *data, u64 val)
990{ 989{
991 *(u32 *)data = val; 990 *(u32 *)data = val;
991 pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
992 intel_pstate_reset_all_pid(); 992 intel_pstate_reset_all_pid();
993 return 0; 993 return 0;
994} 994}
@@ -1225,7 +1225,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1225 limits->max_perf_pct); 1225 limits->max_perf_pct);
1226 limits->max_perf_pct = max(limits->min_perf_pct, 1226 limits->max_perf_pct = max(limits->min_perf_pct,
1227 limits->max_perf_pct); 1227 limits->max_perf_pct);
1228 limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); 1228 limits->max_perf = percent_ext_fp(limits->max_perf_pct);
1229 1229
1230 intel_pstate_update_policies(); 1230 intel_pstate_update_policies();
1231 1231
@@ -1262,7 +1262,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1262 limits->min_perf_pct); 1262 limits->min_perf_pct);
1263 limits->min_perf_pct = min(limits->max_perf_pct, 1263 limits->min_perf_pct = min(limits->max_perf_pct,
1264 limits->min_perf_pct); 1264 limits->min_perf_pct);
1265 limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); 1265 limits->min_perf = percent_ext_fp(limits->min_perf_pct);
1266 1266
1267 intel_pstate_update_policies(); 1267 intel_pstate_update_policies();
1268 1268
@@ -2080,36 +2080,34 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
2080static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, 2080static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
2081 struct perf_limits *limits) 2081 struct perf_limits *limits)
2082{ 2082{
2083 int32_t max_policy_perf, min_policy_perf;
2083 2084
2084 limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, 2085 max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq);
2085 policy->cpuinfo.max_freq); 2086 max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
2086 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
2087 if (policy->max == policy->min) { 2087 if (policy->max == policy->min) {
2088 limits->min_policy_pct = limits->max_policy_pct; 2088 min_policy_perf = max_policy_perf;
2089 } else { 2089 } else {
2090 limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100, 2090 min_policy_perf = div_ext_fp(policy->min,
2091 policy->cpuinfo.max_freq); 2091 policy->cpuinfo.max_freq);
2092 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 2092 min_policy_perf = clamp_t(int32_t, min_policy_perf,
2093 0, 100); 2093 0, max_policy_perf);
2094 } 2094 }
2095 2095
2096 /* Normalize user input to [min_policy_pct, max_policy_pct] */ 2096 /* Normalize user input to [min_perf, max_perf] */
2097 limits->min_perf_pct = max(limits->min_policy_pct, 2097 limits->min_perf = max(min_policy_perf,
2098 limits->min_sysfs_pct); 2098 percent_ext_fp(limits->min_sysfs_pct));
2099 limits->min_perf_pct = min(limits->max_policy_pct, 2099 limits->min_perf = min(limits->min_perf, max_policy_perf);
2100 limits->min_perf_pct); 2100 limits->max_perf = min(max_policy_perf,
2101 limits->max_perf_pct = min(limits->max_policy_pct, 2101 percent_ext_fp(limits->max_sysfs_pct));
2102 limits->max_sysfs_pct); 2102 limits->max_perf = max(min_policy_perf, limits->max_perf);
2103 limits->max_perf_pct = max(limits->min_policy_pct,
2104 limits->max_perf_pct);
2105 2103
2106 /* Make sure min_perf_pct <= max_perf_pct */ 2104 /* Make sure min_perf <= max_perf */
2107 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); 2105 limits->min_perf = min(limits->min_perf, limits->max_perf);
2108 2106
2109 limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
2110 limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
2111 limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS); 2107 limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
2112 limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS); 2108 limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
2109 limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100);
2110 limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100);
2113 2111
2114 pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu, 2112 pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
2115 limits->max_perf_pct, limits->min_perf_pct); 2113 limits->max_perf_pct, limits->min_perf_pct);