aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-23 23:00:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-23 23:00:39 -0400
commitebe64824e9de4b3ab3bd3928312b4b2bc57b4b7e (patch)
tree0ecbd5d85b3ed791168eca65a60ba7aade617a09
parent02a2cad8e83817524cd4e14fc1c68c8c94768723 (diff)
parent90ff2b729e181fba628b854eac2097e5ccc22e66 (diff)
Merge tag 'pm-4.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management fixes from Rafael Wysocki: "One of these is an intel_pstate regression fix and it is not a small change, but it mostly removes code that shouldn't be there. That code was acquired by mistake and has been a source of constant pain since then, so the time has come to get rid of it finally. We have not seen problems with this change in the lab, so fingers crossed. The rest is more usual: one more intel_pstate commit removing useless code, a cpufreq core fix to make it restore policy limits on CPU online (which prevents the limits from being reset over system suspend/resume), a schedutil cpufreq governor initialization fix to make it actually work as advertised on all systems and an extra sanity check in the cpuidle core to prevent crashes from happening if the arch code messes things up. Specifics: - Make intel_pstate use one set of global P-state limits in the active mode regardless of the scaling_governor settings for individual CPUs instead of switching back and forth between two of them in a way that is hard to control (Rafael Wysocki). - Drop a useless function from intel_pstate to prevent it from modifying the maximum supported frequency value unexpectedly which may confuse the cpufreq core (Rafael Wysocki). - Fix the cpufreq core to restore policy limits on CPU online so that the limits are not reset over system suspend/resume, among other things (Viresh Kumar). - Fix the initialization of the schedutil cpufreq governor to make the IO-wait boosting mechanism in it actually work on systems with one CPU per cpufreq policy (Rafael Wysocki). - Add a sanity check to the cpuidle core to prevent crashes from happening if the architecture code initialization fails to set up things as expected (Vaidyanathan Srinivasan)" * tag 'pm-4.11-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: cpufreq: Restore policy min/max limits on CPU online cpuidle: Validate cpu_dev in cpuidle_add_sysfs() cpufreq: intel_pstate: Fix policy data management in passive mode cpufreq: schedutil: Fix per-CPU structure initialization in sugov_start() cpufreq: intel_pstate: One set of global limits in active mode
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c167
-rw-r--r--drivers/cpuidle/sysfs.c12
-rw-r--r--kernel/sched/cpufreq_schedutil.c20
4 files changed, 72 insertions, 130 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b8ff617d449d..5dbdd261aa73 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1184,6 +1184,9 @@ static int cpufreq_online(unsigned int cpu)
1184 for_each_cpu(j, policy->related_cpus) 1184 for_each_cpu(j, policy->related_cpus)
1185 per_cpu(cpufreq_cpu_data, j) = policy; 1185 per_cpu(cpufreq_cpu_data, j) = policy;
1186 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1186 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1187 } else {
1188 policy->min = policy->user_policy.min;
1189 policy->max = policy->user_policy.max;
1187 } 1190 }
1188 1191
1189 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 1192 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 08e134ffba68..283491f742d3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -364,9 +364,7 @@ static bool driver_registered __read_mostly;
364static bool acpi_ppc; 364static bool acpi_ppc;
365#endif 365#endif
366 366
367static struct perf_limits performance_limits; 367static struct perf_limits global;
368static struct perf_limits powersave_limits;
369static struct perf_limits *limits;
370 368
371static void intel_pstate_init_limits(struct perf_limits *limits) 369static void intel_pstate_init_limits(struct perf_limits *limits)
372{ 370{
@@ -377,14 +375,6 @@ static void intel_pstate_init_limits(struct perf_limits *limits)
377 limits->max_sysfs_pct = 100; 375 limits->max_sysfs_pct = 100;
378} 376}
379 377
380static void intel_pstate_set_performance_limits(struct perf_limits *limits)
381{
382 intel_pstate_init_limits(limits);
383 limits->min_perf_pct = 100;
384 limits->min_perf = int_ext_tofp(1);
385 limits->min_sysfs_pct = 100;
386}
387
388static DEFINE_MUTEX(intel_pstate_driver_lock); 378static DEFINE_MUTEX(intel_pstate_driver_lock);
389static DEFINE_MUTEX(intel_pstate_limits_lock); 379static DEFINE_MUTEX(intel_pstate_limits_lock);
390 380
@@ -507,7 +497,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
507 * correct max turbo frequency based on the turbo state. 497 * correct max turbo frequency based on the turbo state.
508 * Also need to convert to MHz as _PSS freq is in MHz. 498 * Also need to convert to MHz as _PSS freq is in MHz.
509 */ 499 */
510 if (!limits->turbo_disabled) 500 if (!global.turbo_disabled)
511 cpu->acpi_perf_data.states[0].core_frequency = 501 cpu->acpi_perf_data.states[0].core_frequency =
512 policy->cpuinfo.max_freq / 1000; 502 policy->cpuinfo.max_freq / 1000;
513 cpu->valid_pss_table = true; 503 cpu->valid_pss_table = true;
@@ -626,7 +616,7 @@ static inline void update_turbo_state(void)
626 616
627 cpu = all_cpu_data[0]; 617 cpu = all_cpu_data[0];
628 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 618 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
629 limits->turbo_disabled = 619 global.turbo_disabled =
630 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 620 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
631 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 621 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
632} 622}
@@ -851,7 +841,7 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
851static void intel_pstate_hwp_set(struct cpufreq_policy *policy) 841static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
852{ 842{
853 int min, hw_min, max, hw_max, cpu; 843 int min, hw_min, max, hw_max, cpu;
854 struct perf_limits *perf_limits = limits; 844 struct perf_limits *perf_limits = &global;
855 u64 value, cap; 845 u64 value, cap;
856 846
857 for_each_cpu(cpu, policy->cpus) { 847 for_each_cpu(cpu, policy->cpus) {
@@ -863,19 +853,22 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
863 853
864 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); 854 rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
865 hw_min = HWP_LOWEST_PERF(cap); 855 hw_min = HWP_LOWEST_PERF(cap);
866 if (limits->no_turbo) 856 if (global.no_turbo)
867 hw_max = HWP_GUARANTEED_PERF(cap); 857 hw_max = HWP_GUARANTEED_PERF(cap);
868 else 858 else
869 hw_max = HWP_HIGHEST_PERF(cap); 859 hw_max = HWP_HIGHEST_PERF(cap);
870 860
871 min = fp_ext_toint(hw_max * perf_limits->min_perf); 861 max = fp_ext_toint(hw_max * perf_limits->max_perf);
862 if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
863 min = max;
864 else
865 min = fp_ext_toint(hw_max * perf_limits->min_perf);
872 866
873 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); 867 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
874 868
875 value &= ~HWP_MIN_PERF(~0L); 869 value &= ~HWP_MIN_PERF(~0L);
876 value |= HWP_MIN_PERF(min); 870 value |= HWP_MIN_PERF(min);
877 871
878 max = fp_ext_toint(hw_max * perf_limits->max_perf);
879 value &= ~HWP_MAX_PERF(~0L); 872 value &= ~HWP_MAX_PERF(~0L);
880 value |= HWP_MAX_PERF(max); 873 value |= HWP_MAX_PERF(max);
881 874
@@ -968,20 +961,11 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
968} 961}
969 962
970static void intel_pstate_update_policies(void) 963static void intel_pstate_update_policies(void)
971 __releases(&intel_pstate_limits_lock)
972 __acquires(&intel_pstate_limits_lock)
973{ 964{
974 struct perf_limits *saved_limits = limits;
975 int cpu; 965 int cpu;
976 966
977 mutex_unlock(&intel_pstate_limits_lock);
978
979 for_each_possible_cpu(cpu) 967 for_each_possible_cpu(cpu)
980 cpufreq_update_policy(cpu); 968 cpufreq_update_policy(cpu);
981
982 mutex_lock(&intel_pstate_limits_lock);
983
984 limits = saved_limits;
985} 969}
986 970
987/************************** debugfs begin ************************/ 971/************************** debugfs begin ************************/
@@ -1060,7 +1044,7 @@ static void intel_pstate_debug_hide_params(void)
1060 static ssize_t show_##file_name \ 1044 static ssize_t show_##file_name \
1061 (struct kobject *kobj, struct attribute *attr, char *buf) \ 1045 (struct kobject *kobj, struct attribute *attr, char *buf) \
1062 { \ 1046 { \
1063 return sprintf(buf, "%u\n", limits->object); \ 1047 return sprintf(buf, "%u\n", global.object); \
1064 } 1048 }
1065 1049
1066static ssize_t intel_pstate_show_status(char *buf); 1050static ssize_t intel_pstate_show_status(char *buf);
@@ -1151,10 +1135,10 @@ static ssize_t show_no_turbo(struct kobject *kobj,
1151 } 1135 }
1152 1136
1153 update_turbo_state(); 1137 update_turbo_state();
1154 if (limits->turbo_disabled) 1138 if (global.turbo_disabled)
1155 ret = sprintf(buf, "%u\n", limits->turbo_disabled); 1139 ret = sprintf(buf, "%u\n", global.turbo_disabled);
1156 else 1140 else
1157 ret = sprintf(buf, "%u\n", limits->no_turbo); 1141 ret = sprintf(buf, "%u\n", global.no_turbo);
1158 1142
1159 mutex_unlock(&intel_pstate_driver_lock); 1143 mutex_unlock(&intel_pstate_driver_lock);
1160 1144
@@ -1181,19 +1165,19 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
1181 mutex_lock(&intel_pstate_limits_lock); 1165 mutex_lock(&intel_pstate_limits_lock);
1182 1166
1183 update_turbo_state(); 1167 update_turbo_state();
1184 if (limits->turbo_disabled) { 1168 if (global.turbo_disabled) {
1185 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 1169 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
1186 mutex_unlock(&intel_pstate_limits_lock); 1170 mutex_unlock(&intel_pstate_limits_lock);
1187 mutex_unlock(&intel_pstate_driver_lock); 1171 mutex_unlock(&intel_pstate_driver_lock);
1188 return -EPERM; 1172 return -EPERM;
1189 } 1173 }
1190 1174
1191 limits->no_turbo = clamp_t(int, input, 0, 1); 1175 global.no_turbo = clamp_t(int, input, 0, 1);
1192
1193 intel_pstate_update_policies();
1194 1176
1195 mutex_unlock(&intel_pstate_limits_lock); 1177 mutex_unlock(&intel_pstate_limits_lock);
1196 1178
1179 intel_pstate_update_policies();
1180
1197 mutex_unlock(&intel_pstate_driver_lock); 1181 mutex_unlock(&intel_pstate_driver_lock);
1198 1182
1199 return count; 1183 return count;
@@ -1218,19 +1202,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1218 1202
1219 mutex_lock(&intel_pstate_limits_lock); 1203 mutex_lock(&intel_pstate_limits_lock);
1220 1204
1221 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); 1205 global.max_sysfs_pct = clamp_t(int, input, 0 , 100);
1222 limits->max_perf_pct = min(limits->max_policy_pct, 1206 global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct);
1223 limits->max_sysfs_pct); 1207 global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct);
1224 limits->max_perf_pct = max(limits->min_policy_pct, 1208 global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct);
1225 limits->max_perf_pct); 1209 global.max_perf = percent_ext_fp(global.max_perf_pct);
1226 limits->max_perf_pct = max(limits->min_perf_pct,
1227 limits->max_perf_pct);
1228 limits->max_perf = percent_ext_fp(limits->max_perf_pct);
1229
1230 intel_pstate_update_policies();
1231 1210
1232 mutex_unlock(&intel_pstate_limits_lock); 1211 mutex_unlock(&intel_pstate_limits_lock);
1233 1212
1213 intel_pstate_update_policies();
1214
1234 mutex_unlock(&intel_pstate_driver_lock); 1215 mutex_unlock(&intel_pstate_driver_lock);
1235 1216
1236 return count; 1217 return count;
@@ -1255,19 +1236,16 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1255 1236
1256 mutex_lock(&intel_pstate_limits_lock); 1237 mutex_lock(&intel_pstate_limits_lock);
1257 1238
1258 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); 1239 global.min_sysfs_pct = clamp_t(int, input, 0 , 100);
1259 limits->min_perf_pct = max(limits->min_policy_pct, 1240 global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct);
1260 limits->min_sysfs_pct); 1241 global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct);
1261 limits->min_perf_pct = min(limits->max_policy_pct, 1242 global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct);
1262 limits->min_perf_pct); 1243 global.min_perf = percent_ext_fp(global.min_perf_pct);
1263 limits->min_perf_pct = min(limits->max_perf_pct,
1264 limits->min_perf_pct);
1265 limits->min_perf = percent_ext_fp(limits->min_perf_pct);
1266
1267 intel_pstate_update_policies();
1268 1244
1269 mutex_unlock(&intel_pstate_limits_lock); 1245 mutex_unlock(&intel_pstate_limits_lock);
1270 1246
1247 intel_pstate_update_policies();
1248
1271 mutex_unlock(&intel_pstate_driver_lock); 1249 mutex_unlock(&intel_pstate_driver_lock);
1272 1250
1273 return count; 1251 return count;
@@ -1387,7 +1365,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
1387 u32 vid; 1365 u32 vid;
1388 1366
1389 val = (u64)pstate << 8; 1367 val = (u64)pstate << 8;
1390 if (limits->no_turbo && !limits->turbo_disabled) 1368 if (global.no_turbo && !global.turbo_disabled)
1391 val |= (u64)1 << 32; 1369 val |= (u64)1 << 32;
1392 1370
1393 vid_fp = cpudata->vid.min + mul_fp( 1371 vid_fp = cpudata->vid.min + mul_fp(
@@ -1557,7 +1535,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
1557 u64 val; 1535 u64 val;
1558 1536
1559 val = (u64)pstate << 8; 1537 val = (u64)pstate << 8;
1560 if (limits->no_turbo && !limits->turbo_disabled) 1538 if (global.no_turbo && !global.turbo_disabled)
1561 val |= (u64)1 << 32; 1539 val |= (u64)1 << 32;
1562 1540
1563 return val; 1541 return val;
@@ -1683,9 +1661,9 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
1683 int max_perf = cpu->pstate.turbo_pstate; 1661 int max_perf = cpu->pstate.turbo_pstate;
1684 int max_perf_adj; 1662 int max_perf_adj;
1685 int min_perf; 1663 int min_perf;
1686 struct perf_limits *perf_limits = limits; 1664 struct perf_limits *perf_limits = &global;
1687 1665
1688 if (limits->no_turbo || limits->turbo_disabled) 1666 if (global.no_turbo || global.turbo_disabled)
1689 max_perf = cpu->pstate.max_pstate; 1667 max_perf = cpu->pstate.max_pstate;
1690 1668
1691 if (per_cpu_limits) 1669 if (per_cpu_limits)
@@ -1820,7 +1798,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
1820 1798
1821 sample->busy_scaled = busy_frac * 100; 1799 sample->busy_scaled = busy_frac * 100;
1822 1800
1823 target = limits->no_turbo || limits->turbo_disabled ? 1801 target = global.no_turbo || global.turbo_disabled ?
1824 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 1802 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1825 target += target >> 2; 1803 target += target >> 2;
1826 target = mul_fp(target, busy_frac); 1804 target = mul_fp(target, busy_frac);
@@ -2116,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
2116static int intel_pstate_set_policy(struct cpufreq_policy *policy) 2094static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2117{ 2095{
2118 struct cpudata *cpu; 2096 struct cpudata *cpu;
2119 struct perf_limits *perf_limits = NULL; 2097 struct perf_limits *perf_limits = &global;
2120 2098
2121 if (!policy->cpuinfo.max_freq) 2099 if (!policy->cpuinfo.max_freq)
2122 return -ENODEV; 2100 return -ENODEV;
@@ -2139,21 +2117,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2139 2117
2140 mutex_lock(&intel_pstate_limits_lock); 2118 mutex_lock(&intel_pstate_limits_lock);
2141 2119
2142 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
2143 pr_debug("set performance\n");
2144 if (!perf_limits) {
2145 limits = &performance_limits;
2146 perf_limits = limits;
2147 }
2148 } else {
2149 pr_debug("set powersave\n");
2150 if (!perf_limits) {
2151 limits = &powersave_limits;
2152 perf_limits = limits;
2153 }
2154
2155 }
2156
2157 intel_pstate_update_perf_limits(policy, perf_limits); 2120 intel_pstate_update_perf_limits(policy, perf_limits);
2158 2121
2159 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { 2122 if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
@@ -2177,16 +2140,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2177static int intel_pstate_verify_policy(struct cpufreq_policy *policy) 2140static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2178{ 2141{
2179 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2142 struct cpudata *cpu = all_cpu_data[policy->cpu];
2180 struct perf_limits *perf_limits;
2181
2182 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
2183 perf_limits = &performance_limits;
2184 else
2185 perf_limits = &powersave_limits;
2186 2143
2187 update_turbo_state(); 2144 update_turbo_state();
2188 policy->cpuinfo.max_freq = perf_limits->turbo_disabled || 2145 policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ?
2189 perf_limits->no_turbo ?
2190 cpu->pstate.max_freq : 2146 cpu->pstate.max_freq :
2191 cpu->pstate.turbo_freq; 2147 cpu->pstate.turbo_freq;
2192 2148
@@ -2201,9 +2157,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
2201 unsigned int max_freq, min_freq; 2157 unsigned int max_freq, min_freq;
2202 2158
2203 max_freq = policy->cpuinfo.max_freq * 2159 max_freq = policy->cpuinfo.max_freq *
2204 perf_limits->max_sysfs_pct / 100; 2160 global.max_sysfs_pct / 100;
2205 min_freq = policy->cpuinfo.max_freq * 2161 min_freq = policy->cpuinfo.max_freq *
2206 perf_limits->min_sysfs_pct / 100; 2162 global.min_sysfs_pct / 100;
2207 cpufreq_verify_within_limits(policy, min_freq, max_freq); 2163 cpufreq_verify_within_limits(policy, min_freq, max_freq);
2208 } 2164 }
2209 2165
@@ -2255,7 +2211,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2255 /* cpuinfo and default policy values */ 2211 /* cpuinfo and default policy values */
2256 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; 2212 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
2257 update_turbo_state(); 2213 update_turbo_state();
2258 policy->cpuinfo.max_freq = limits->turbo_disabled ? 2214 policy->cpuinfo.max_freq = global.turbo_disabled ?
2259 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; 2215 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
2260 policy->cpuinfo.max_freq *= cpu->pstate.scaling; 2216 policy->cpuinfo.max_freq *= cpu->pstate.scaling;
2261 2217
@@ -2275,7 +2231,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2275 return ret; 2231 return ret;
2276 2232
2277 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 2233 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
2278 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) 2234 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
2279 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 2235 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
2280 else 2236 else
2281 policy->policy = CPUFREQ_POLICY_POWERSAVE; 2237 policy->policy = CPUFREQ_POLICY_POWERSAVE;
@@ -2301,7 +2257,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
2301 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2257 struct cpudata *cpu = all_cpu_data[policy->cpu];
2302 2258
2303 update_turbo_state(); 2259 update_turbo_state();
2304 policy->cpuinfo.max_freq = limits->turbo_disabled ? 2260 policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ?
2305 cpu->pstate.max_freq : cpu->pstate.turbo_freq; 2261 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2306 2262
2307 cpufreq_verify_within_cpu_limits(policy); 2263 cpufreq_verify_within_cpu_limits(policy);
@@ -2309,26 +2265,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
2309 return 0; 2265 return 0;
2310} 2266}
2311 2267
2312static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
2313 struct cpufreq_policy *policy,
2314 unsigned int target_freq)
2315{
2316 unsigned int max_freq;
2317
2318 update_turbo_state();
2319
2320 max_freq = limits->no_turbo || limits->turbo_disabled ?
2321 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2322 policy->cpuinfo.max_freq = max_freq;
2323 if (policy->max > max_freq)
2324 policy->max = max_freq;
2325
2326 if (target_freq > max_freq)
2327 target_freq = max_freq;
2328
2329 return target_freq;
2330}
2331
2332static int intel_cpufreq_target(struct cpufreq_policy *policy, 2268static int intel_cpufreq_target(struct cpufreq_policy *policy,
2333 unsigned int target_freq, 2269 unsigned int target_freq,
2334 unsigned int relation) 2270 unsigned int relation)
@@ -2337,8 +2273,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
2337 struct cpufreq_freqs freqs; 2273 struct cpufreq_freqs freqs;
2338 int target_pstate; 2274 int target_pstate;
2339 2275
2276 update_turbo_state();
2277
2340 freqs.old = policy->cur; 2278 freqs.old = policy->cur;
2341 freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq); 2279 freqs.new = target_freq;
2342 2280
2343 cpufreq_freq_transition_begin(policy, &freqs); 2281 cpufreq_freq_transition_begin(policy, &freqs);
2344 switch (relation) { 2282 switch (relation) {
@@ -2370,7 +2308,8 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
2370 struct cpudata *cpu = all_cpu_data[policy->cpu]; 2308 struct cpudata *cpu = all_cpu_data[policy->cpu];
2371 int target_pstate; 2309 int target_pstate;
2372 2310
2373 target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); 2311 update_turbo_state();
2312
2374 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); 2313 target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
2375 target_pstate = intel_pstate_prepare_request(cpu, target_pstate); 2314 target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2376 intel_pstate_update_pstate(cpu, target_pstate); 2315 intel_pstate_update_pstate(cpu, target_pstate);
@@ -2425,13 +2364,7 @@ static int intel_pstate_register_driver(void)
2425{ 2364{
2426 int ret; 2365 int ret;
2427 2366
2428 intel_pstate_init_limits(&powersave_limits); 2367 intel_pstate_init_limits(&global);
2429 intel_pstate_set_performance_limits(&performance_limits);
2430 if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
2431 intel_pstate_driver == &intel_pstate)
2432 limits = &performance_limits;
2433 else
2434 limits = &powersave_limits;
2435 2368
2436 ret = cpufreq_register_driver(intel_pstate_driver); 2369 ret = cpufreq_register_driver(intel_pstate_driver);
2437 if (ret) { 2370 if (ret) {
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index c5adc8c9ac43..ae948b1da93a 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -615,6 +615,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
615 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); 615 struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
616 int error; 616 int error;
617 617
618 /*
619 * Return if cpu_device is not setup for this CPU.
620 *
621 * This could happen if the arch did not set up cpu_device
622 * since this CPU is not in cpu_present mask and the
623 * driver did not send a correct CPU mask during registration.
624 * Without this check we would end up passing bogus
625 * value for &cpu_dev->kobj in kobject_init_and_add()
626 */
627 if (!cpu_dev)
628 return -ENODEV;
629
618 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); 630 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
619 if (!kdev) 631 if (!kdev)
620 return -ENOMEM; 632 return -ENOMEM;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index cd7cd489f739..54c577578da6 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -584,20 +584,14 @@ static int sugov_start(struct cpufreq_policy *policy)
584 for_each_cpu(cpu, policy->cpus) { 584 for_each_cpu(cpu, policy->cpus) {
585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); 585 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
586 586
587 memset(sg_cpu, 0, sizeof(*sg_cpu));
587 sg_cpu->sg_policy = sg_policy; 588 sg_cpu->sg_policy = sg_policy;
588 if (policy_is_shared(policy)) { 589 sg_cpu->flags = SCHED_CPUFREQ_RT;
589 sg_cpu->util = 0; 590 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
590 sg_cpu->max = 0; 591 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
591 sg_cpu->flags = SCHED_CPUFREQ_RT; 592 policy_is_shared(policy) ?
592 sg_cpu->last_update = 0; 593 sugov_update_shared :
593 sg_cpu->iowait_boost = 0; 594 sugov_update_single);
594 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
595 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
596 sugov_update_shared);
597 } else {
598 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
599 sugov_update_single);
600 }
601 } 595 }
602 return 0; 596 return 0;
603} 597}