diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 415 |
1 files changed, 209 insertions, 206 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 199b52b7c3e1..2677ff1c0a2c 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/syscore_ops.h> | 29 | #include <linux/suspend.h> |
30 | #include <linux/tick.h> | 30 | #include <linux/tick.h> |
31 | #include <trace/events/power.h> | 31 | #include <trace/events/power.h> |
32 | 32 | ||
@@ -42,10 +42,11 @@ static DEFINE_RWLOCK(cpufreq_driver_lock); | |||
42 | DEFINE_MUTEX(cpufreq_governor_lock); | 42 | DEFINE_MUTEX(cpufreq_governor_lock); |
43 | static LIST_HEAD(cpufreq_policy_list); | 43 | static LIST_HEAD(cpufreq_policy_list); |
44 | 44 | ||
45 | #ifdef CONFIG_HOTPLUG_CPU | ||
46 | /* This one keeps track of the previously set governor of a removed CPU */ | 45 | /* This one keeps track of the previously set governor of a removed CPU */ |
47 | static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); | 46 | static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); |
48 | #endif | 47 | |
48 | /* Flag to suspend/resume CPUFreq governors */ | ||
49 | static bool cpufreq_suspended; | ||
49 | 50 | ||
50 | static inline bool has_target(void) | 51 | static inline bool has_target(void) |
51 | { | 52 | { |
@@ -181,8 +182,8 @@ unsigned int cpufreq_generic_get(unsigned int cpu) | |||
181 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); | 182 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); |
182 | 183 | ||
183 | if (!policy || IS_ERR(policy->clk)) { | 184 | if (!policy || IS_ERR(policy->clk)) { |
184 | pr_err("%s: No %s associated to cpu: %d\n", __func__, | 185 | pr_err("%s: No %s associated to cpu: %d\n", |
185 | policy ? "clk" : "policy", cpu); | 186 | __func__, policy ? "clk" : "policy", cpu); |
186 | return 0; | 187 | return 0; |
187 | } | 188 | } |
188 | 189 | ||
@@ -190,6 +191,12 @@ unsigned int cpufreq_generic_get(unsigned int cpu) | |||
190 | } | 191 | } |
191 | EXPORT_SYMBOL_GPL(cpufreq_generic_get); | 192 | EXPORT_SYMBOL_GPL(cpufreq_generic_get); |
192 | 193 | ||
194 | /* Only for cpufreq core internal use */ | ||
195 | struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) | ||
196 | { | ||
197 | return per_cpu(cpufreq_cpu_data, cpu); | ||
198 | } | ||
199 | |||
193 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) | 200 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
194 | { | 201 | { |
195 | struct cpufreq_policy *policy = NULL; | 202 | struct cpufreq_policy *policy = NULL; |
@@ -254,15 +261,15 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | |||
254 | if (!l_p_j_ref_freq) { | 261 | if (!l_p_j_ref_freq) { |
255 | l_p_j_ref = loops_per_jiffy; | 262 | l_p_j_ref = loops_per_jiffy; |
256 | l_p_j_ref_freq = ci->old; | 263 | l_p_j_ref_freq = ci->old; |
257 | pr_debug("saving %lu as reference value for loops_per_jiffy; " | 264 | pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n", |
258 | "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); | 265 | l_p_j_ref, l_p_j_ref_freq); |
259 | } | 266 | } |
260 | if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || | 267 | if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || |
261 | (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { | 268 | (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { |
262 | loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, | 269 | loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, |
263 | ci->new); | 270 | ci->new); |
264 | pr_debug("scaling loops_per_jiffy to %lu " | 271 | pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n", |
265 | "for frequency %u kHz\n", loops_per_jiffy, ci->new); | 272 | loops_per_jiffy, ci->new); |
266 | } | 273 | } |
267 | } | 274 | } |
268 | #else | 275 | #else |
@@ -282,7 +289,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, | |||
282 | 289 | ||
283 | freqs->flags = cpufreq_driver->flags; | 290 | freqs->flags = cpufreq_driver->flags; |
284 | pr_debug("notification %u of frequency transition to %u kHz\n", | 291 | pr_debug("notification %u of frequency transition to %u kHz\n", |
285 | state, freqs->new); | 292 | state, freqs->new); |
286 | 293 | ||
287 | switch (state) { | 294 | switch (state) { |
288 | 295 | ||
@@ -294,9 +301,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, | |||
294 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | 301 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { |
295 | if ((policy) && (policy->cpu == freqs->cpu) && | 302 | if ((policy) && (policy->cpu == freqs->cpu) && |
296 | (policy->cur) && (policy->cur != freqs->old)) { | 303 | (policy->cur) && (policy->cur != freqs->old)) { |
297 | pr_debug("Warning: CPU frequency is" | 304 | pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n", |
298 | " %u, cpufreq assumed %u kHz.\n", | 305 | freqs->old, policy->cur); |
299 | freqs->old, policy->cur); | ||
300 | freqs->old = policy->cur; | 306 | freqs->old = policy->cur; |
301 | } | 307 | } |
302 | } | 308 | } |
@@ -307,8 +313,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, | |||
307 | 313 | ||
308 | case CPUFREQ_POSTCHANGE: | 314 | case CPUFREQ_POSTCHANGE: |
309 | adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); | 315 | adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); |
310 | pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, | 316 | pr_debug("FREQ: %lu - CPU: %lu\n", |
311 | (unsigned long)freqs->cpu); | 317 | (unsigned long)freqs->new, (unsigned long)freqs->cpu); |
312 | trace_cpu_frequency(freqs->new, freqs->cpu); | 318 | trace_cpu_frequency(freqs->new, freqs->cpu); |
313 | srcu_notifier_call_chain(&cpufreq_transition_notifier_list, | 319 | srcu_notifier_call_chain(&cpufreq_transition_notifier_list, |
314 | CPUFREQ_POSTCHANGE, freqs); | 320 | CPUFREQ_POSTCHANGE, freqs); |
@@ -352,7 +358,7 @@ EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition); | |||
352 | /********************************************************************* | 358 | /********************************************************************* |
353 | * SYSFS INTERFACE * | 359 | * SYSFS INTERFACE * |
354 | *********************************************************************/ | 360 | *********************************************************************/ |
355 | ssize_t show_boost(struct kobject *kobj, | 361 | static ssize_t show_boost(struct kobject *kobj, |
356 | struct attribute *attr, char *buf) | 362 | struct attribute *attr, char *buf) |
357 | { | 363 | { |
358 | return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); | 364 | return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); |
@@ -368,13 +374,13 @@ static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, | |||
368 | return -EINVAL; | 374 | return -EINVAL; |
369 | 375 | ||
370 | if (cpufreq_boost_trigger_state(enable)) { | 376 | if (cpufreq_boost_trigger_state(enable)) { |
371 | pr_err("%s: Cannot %s BOOST!\n", __func__, | 377 | pr_err("%s: Cannot %s BOOST!\n", |
372 | enable ? "enable" : "disable"); | 378 | __func__, enable ? "enable" : "disable"); |
373 | return -EINVAL; | 379 | return -EINVAL; |
374 | } | 380 | } |
375 | 381 | ||
376 | pr_debug("%s: cpufreq BOOST %s\n", __func__, | 382 | pr_debug("%s: cpufreq BOOST %s\n", |
377 | enable ? "enabled" : "disabled"); | 383 | __func__, enable ? "enabled" : "disabled"); |
378 | 384 | ||
379 | return count; | 385 | return count; |
380 | } | 386 | } |
@@ -879,18 +885,25 @@ err_out_kobj_put: | |||
879 | 885 | ||
880 | static void cpufreq_init_policy(struct cpufreq_policy *policy) | 886 | static void cpufreq_init_policy(struct cpufreq_policy *policy) |
881 | { | 887 | { |
888 | struct cpufreq_governor *gov = NULL; | ||
882 | struct cpufreq_policy new_policy; | 889 | struct cpufreq_policy new_policy; |
883 | int ret = 0; | 890 | int ret = 0; |
884 | 891 | ||
885 | memcpy(&new_policy, policy, sizeof(*policy)); | 892 | memcpy(&new_policy, policy, sizeof(*policy)); |
886 | 893 | ||
894 | /* Update governor of new_policy to the governor used before hotplug */ | ||
895 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); | ||
896 | if (gov) | ||
897 | pr_debug("Restoring governor %s for cpu %d\n", | ||
898 | policy->governor->name, policy->cpu); | ||
899 | else | ||
900 | gov = CPUFREQ_DEFAULT_GOVERNOR; | ||
901 | |||
902 | new_policy.governor = gov; | ||
903 | |||
887 | /* Use the default policy if its valid. */ | 904 | /* Use the default policy if its valid. */ |
888 | if (cpufreq_driver->setpolicy) | 905 | if (cpufreq_driver->setpolicy) |
889 | cpufreq_parse_governor(policy->governor->name, | 906 | cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); |
890 | &new_policy.policy, NULL); | ||
891 | |||
892 | /* assure that the starting sequence is run in cpufreq_set_policy */ | ||
893 | policy->governor = NULL; | ||
894 | 907 | ||
895 | /* set default policy */ | 908 | /* set default policy */ |
896 | ret = cpufreq_set_policy(policy, &new_policy); | 909 | ret = cpufreq_set_policy(policy, &new_policy); |
@@ -949,6 +962,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) | |||
949 | 962 | ||
950 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 963 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
951 | 964 | ||
965 | policy->governor = NULL; | ||
966 | |||
952 | return policy; | 967 | return policy; |
953 | } | 968 | } |
954 | 969 | ||
@@ -1022,21 +1037,19 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) | |||
1022 | 1037 | ||
1023 | up_write(&policy->rwsem); | 1038 | up_write(&policy->rwsem); |
1024 | 1039 | ||
1025 | cpufreq_frequency_table_update_policy_cpu(policy); | ||
1026 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 1040 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
1027 | CPUFREQ_UPDATE_POLICY_CPU, policy); | 1041 | CPUFREQ_UPDATE_POLICY_CPU, policy); |
1028 | } | 1042 | } |
1029 | 1043 | ||
1030 | static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | 1044 | static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) |
1031 | bool frozen) | ||
1032 | { | 1045 | { |
1033 | unsigned int j, cpu = dev->id; | 1046 | unsigned int j, cpu = dev->id; |
1034 | int ret = -ENOMEM; | 1047 | int ret = -ENOMEM; |
1035 | struct cpufreq_policy *policy; | 1048 | struct cpufreq_policy *policy; |
1036 | unsigned long flags; | 1049 | unsigned long flags; |
1050 | bool recover_policy = cpufreq_suspended; | ||
1037 | #ifdef CONFIG_HOTPLUG_CPU | 1051 | #ifdef CONFIG_HOTPLUG_CPU |
1038 | struct cpufreq_policy *tpolicy; | 1052 | struct cpufreq_policy *tpolicy; |
1039 | struct cpufreq_governor *gov; | ||
1040 | #endif | 1053 | #endif |
1041 | 1054 | ||
1042 | if (cpu_is_offline(cpu)) | 1055 | if (cpu_is_offline(cpu)) |
@@ -1075,9 +1088,9 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
1075 | * Restore the saved policy when doing light-weight init and fall back | 1088 | * Restore the saved policy when doing light-weight init and fall back |
1076 | * to the full init if that fails. | 1089 | * to the full init if that fails. |
1077 | */ | 1090 | */ |
1078 | policy = frozen ? cpufreq_policy_restore(cpu) : NULL; | 1091 | policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; |
1079 | if (!policy) { | 1092 | if (!policy) { |
1080 | frozen = false; | 1093 | recover_policy = false; |
1081 | policy = cpufreq_policy_alloc(); | 1094 | policy = cpufreq_policy_alloc(); |
1082 | if (!policy) | 1095 | if (!policy) |
1083 | goto nomem_out; | 1096 | goto nomem_out; |
@@ -1089,12 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
1089 | * the creation of a brand new one. So we need to perform this update | 1102 | * the creation of a brand new one. So we need to perform this update |
1090 | * by invoking update_policy_cpu(). | 1103 | * by invoking update_policy_cpu(). |
1091 | */ | 1104 | */ |
1092 | if (frozen && cpu != policy->cpu) | 1105 | if (recover_policy && cpu != policy->cpu) |
1093 | update_policy_cpu(policy, cpu); | 1106 | update_policy_cpu(policy, cpu); |
1094 | else | 1107 | else |
1095 | policy->cpu = cpu; | 1108 | policy->cpu = cpu; |
1096 | 1109 | ||
1097 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
1098 | cpumask_copy(policy->cpus, cpumask_of(cpu)); | 1110 | cpumask_copy(policy->cpus, cpumask_of(cpu)); |
1099 | 1111 | ||
1100 | init_completion(&policy->kobj_unregister); | 1112 | init_completion(&policy->kobj_unregister); |
@@ -1118,7 +1130,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
1118 | */ | 1130 | */ |
1119 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); | 1131 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); |
1120 | 1132 | ||
1121 | if (!frozen) { | 1133 | if (!recover_policy) { |
1122 | policy->user_policy.min = policy->min; | 1134 | policy->user_policy.min = policy->min; |
1123 | policy->user_policy.max = policy->max; | 1135 | policy->user_policy.max = policy->max; |
1124 | } | 1136 | } |
@@ -1180,16 +1192,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
1180 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 1192 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
1181 | CPUFREQ_START, policy); | 1193 | CPUFREQ_START, policy); |
1182 | 1194 | ||
1183 | #ifdef CONFIG_HOTPLUG_CPU | 1195 | if (!recover_policy) { |
1184 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); | ||
1185 | if (gov) { | ||
1186 | policy->governor = gov; | ||
1187 | pr_debug("Restoring governor %s for cpu %d\n", | ||
1188 | policy->governor->name, cpu); | ||
1189 | } | ||
1190 | #endif | ||
1191 | |||
1192 | if (!frozen) { | ||
1193 | ret = cpufreq_add_dev_interface(policy, dev); | 1196 | ret = cpufreq_add_dev_interface(policy, dev); |
1194 | if (ret) | 1197 | if (ret) |
1195 | goto err_out_unregister; | 1198 | goto err_out_unregister; |
@@ -1203,7 +1206,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
1203 | 1206 | ||
1204 | cpufreq_init_policy(policy); | 1207 | cpufreq_init_policy(policy); |
1205 | 1208 | ||
1206 | if (!frozen) { | 1209 | if (!recover_policy) { |
1207 | policy->user_policy.policy = policy->policy; | 1210 | policy->user_policy.policy = policy->policy; |
1208 | policy->user_policy.governor = policy->governor; | 1211 | policy->user_policy.governor = policy->governor; |
1209 | } | 1212 | } |
@@ -1226,7 +1229,7 @@ err_get_freq: | |||
1226 | if (cpufreq_driver->exit) | 1229 | if (cpufreq_driver->exit) |
1227 | cpufreq_driver->exit(policy); | 1230 | cpufreq_driver->exit(policy); |
1228 | err_set_policy_cpu: | 1231 | err_set_policy_cpu: |
1229 | if (frozen) { | 1232 | if (recover_policy) { |
1230 | /* Do not leave stale fallback data behind. */ | 1233 | /* Do not leave stale fallback data behind. */ |
1231 | per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; | 1234 | per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; |
1232 | cpufreq_policy_put_kobj(policy); | 1235 | cpufreq_policy_put_kobj(policy); |
@@ -1250,7 +1253,7 @@ nomem_out: | |||
1250 | */ | 1253 | */ |
1251 | static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | 1254 | static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) |
1252 | { | 1255 | { |
1253 | return __cpufreq_add_dev(dev, sif, false); | 1256 | return __cpufreq_add_dev(dev, sif); |
1254 | } | 1257 | } |
1255 | 1258 | ||
1256 | static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | 1259 | static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, |
@@ -1265,7 +1268,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | |||
1265 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); | 1268 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); |
1266 | ret = kobject_move(&policy->kobj, &cpu_dev->kobj); | 1269 | ret = kobject_move(&policy->kobj, &cpu_dev->kobj); |
1267 | if (ret) { | 1270 | if (ret) { |
1268 | pr_err("%s: Failed to move kobj: %d", __func__, ret); | 1271 | pr_err("%s: Failed to move kobj: %d\n", __func__, ret); |
1269 | 1272 | ||
1270 | down_write(&policy->rwsem); | 1273 | down_write(&policy->rwsem); |
1271 | cpumask_set_cpu(old_cpu, policy->cpus); | 1274 | cpumask_set_cpu(old_cpu, policy->cpus); |
@@ -1281,8 +1284,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, | |||
1281 | } | 1284 | } |
1282 | 1285 | ||
1283 | static int __cpufreq_remove_dev_prepare(struct device *dev, | 1286 | static int __cpufreq_remove_dev_prepare(struct device *dev, |
1284 | struct subsys_interface *sif, | 1287 | struct subsys_interface *sif) |
1285 | bool frozen) | ||
1286 | { | 1288 | { |
1287 | unsigned int cpu = dev->id, cpus; | 1289 | unsigned int cpu = dev->id, cpus; |
1288 | int new_cpu, ret; | 1290 | int new_cpu, ret; |
@@ -1296,7 +1298,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
1296 | policy = per_cpu(cpufreq_cpu_data, cpu); | 1298 | policy = per_cpu(cpufreq_cpu_data, cpu); |
1297 | 1299 | ||
1298 | /* Save the policy somewhere when doing a light-weight tear-down */ | 1300 | /* Save the policy somewhere when doing a light-weight tear-down */ |
1299 | if (frozen) | 1301 | if (cpufreq_suspended) |
1300 | per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; | 1302 | per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; |
1301 | 1303 | ||
1302 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1304 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
@@ -1314,11 +1316,9 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
1314 | } | 1316 | } |
1315 | } | 1317 | } |
1316 | 1318 | ||
1317 | #ifdef CONFIG_HOTPLUG_CPU | ||
1318 | if (!cpufreq_driver->setpolicy) | 1319 | if (!cpufreq_driver->setpolicy) |
1319 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), | 1320 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), |
1320 | policy->governor->name, CPUFREQ_NAME_LEN); | 1321 | policy->governor->name, CPUFREQ_NAME_LEN); |
1321 | #endif | ||
1322 | 1322 | ||
1323 | down_read(&policy->rwsem); | 1323 | down_read(&policy->rwsem); |
1324 | cpus = cpumask_weight(policy->cpus); | 1324 | cpus = cpumask_weight(policy->cpus); |
@@ -1331,9 +1331,9 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
1331 | if (new_cpu >= 0) { | 1331 | if (new_cpu >= 0) { |
1332 | update_policy_cpu(policy, new_cpu); | 1332 | update_policy_cpu(policy, new_cpu); |
1333 | 1333 | ||
1334 | if (!frozen) { | 1334 | if (!cpufreq_suspended) { |
1335 | pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", | 1335 | pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", |
1336 | __func__, new_cpu, cpu); | 1336 | __func__, new_cpu, cpu); |
1337 | } | 1337 | } |
1338 | } | 1338 | } |
1339 | } | 1339 | } |
@@ -1342,8 +1342,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, | |||
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | static int __cpufreq_remove_dev_finish(struct device *dev, | 1344 | static int __cpufreq_remove_dev_finish(struct device *dev, |
1345 | struct subsys_interface *sif, | 1345 | struct subsys_interface *sif) |
1346 | bool frozen) | ||
1347 | { | 1346 | { |
1348 | unsigned int cpu = dev->id, cpus; | 1347 | unsigned int cpu = dev->id, cpus; |
1349 | int ret; | 1348 | int ret; |
@@ -1373,12 +1372,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1373 | CPUFREQ_GOV_POLICY_EXIT); | 1372 | CPUFREQ_GOV_POLICY_EXIT); |
1374 | if (ret) { | 1373 | if (ret) { |
1375 | pr_err("%s: Failed to exit governor\n", | 1374 | pr_err("%s: Failed to exit governor\n", |
1376 | __func__); | 1375 | __func__); |
1377 | return ret; | 1376 | return ret; |
1378 | } | 1377 | } |
1379 | } | 1378 | } |
1380 | 1379 | ||
1381 | if (!frozen) | 1380 | if (!cpufreq_suspended) |
1382 | cpufreq_policy_put_kobj(policy); | 1381 | cpufreq_policy_put_kobj(policy); |
1383 | 1382 | ||
1384 | /* | 1383 | /* |
@@ -1394,14 +1393,14 @@ static int __cpufreq_remove_dev_finish(struct device *dev, | |||
1394 | list_del(&policy->policy_list); | 1393 | list_del(&policy->policy_list); |
1395 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1394 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1396 | 1395 | ||
1397 | if (!frozen) | 1396 | if (!cpufreq_suspended) |
1398 | cpufreq_policy_free(policy); | 1397 | cpufreq_policy_free(policy); |
1399 | } else { | 1398 | } else { |
1400 | if (has_target()) { | 1399 | if (has_target()) { |
1401 | if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || | 1400 | if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || |
1402 | (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { | 1401 | (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) { |
1403 | pr_err("%s: Failed to start governor\n", | 1402 | pr_err("%s: Failed to start governor\n", |
1404 | __func__); | 1403 | __func__); |
1405 | return ret; | 1404 | return ret; |
1406 | } | 1405 | } |
1407 | } | 1406 | } |
@@ -1424,10 +1423,10 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | |||
1424 | if (cpu_is_offline(cpu)) | 1423 | if (cpu_is_offline(cpu)) |
1425 | return 0; | 1424 | return 0; |
1426 | 1425 | ||
1427 | ret = __cpufreq_remove_dev_prepare(dev, sif, false); | 1426 | ret = __cpufreq_remove_dev_prepare(dev, sif); |
1428 | 1427 | ||
1429 | if (!ret) | 1428 | if (!ret) |
1430 | ret = __cpufreq_remove_dev_finish(dev, sif, false); | 1429 | ret = __cpufreq_remove_dev_finish(dev, sif); |
1431 | 1430 | ||
1432 | return ret; | 1431 | return ret; |
1433 | } | 1432 | } |
@@ -1458,8 +1457,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, | |||
1458 | struct cpufreq_freqs freqs; | 1457 | struct cpufreq_freqs freqs; |
1459 | unsigned long flags; | 1458 | unsigned long flags; |
1460 | 1459 | ||
1461 | pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " | 1460 | pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n", |
1462 | "core thinks of %u, is %u kHz.\n", old_freq, new_freq); | 1461 | old_freq, new_freq); |
1463 | 1462 | ||
1464 | freqs.old = old_freq; | 1463 | freqs.old = old_freq; |
1465 | freqs.new = new_freq; | 1464 | freqs.new = new_freq; |
@@ -1570,83 +1569,104 @@ static struct subsys_interface cpufreq_interface = { | |||
1570 | .remove_dev = cpufreq_remove_dev, | 1569 | .remove_dev = cpufreq_remove_dev, |
1571 | }; | 1570 | }; |
1572 | 1571 | ||
1572 | /* | ||
1573 | * In case platform wants some specific frequency to be configured | ||
1574 | * during suspend.. | ||
1575 | */ | ||
1576 | int cpufreq_generic_suspend(struct cpufreq_policy *policy) | ||
1577 | { | ||
1578 | int ret; | ||
1579 | |||
1580 | if (!policy->suspend_freq) { | ||
1581 | pr_err("%s: suspend_freq can't be zero\n", __func__); | ||
1582 | return -EINVAL; | ||
1583 | } | ||
1584 | |||
1585 | pr_debug("%s: Setting suspend-freq: %u\n", __func__, | ||
1586 | policy->suspend_freq); | ||
1587 | |||
1588 | ret = __cpufreq_driver_target(policy, policy->suspend_freq, | ||
1589 | CPUFREQ_RELATION_H); | ||
1590 | if (ret) | ||
1591 | pr_err("%s: unable to set suspend-freq: %u. err: %d\n", | ||
1592 | __func__, policy->suspend_freq, ret); | ||
1593 | |||
1594 | return ret; | ||
1595 | } | ||
1596 | EXPORT_SYMBOL(cpufreq_generic_suspend); | ||
1597 | |||
1573 | /** | 1598 | /** |
1574 | * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. | 1599 | * cpufreq_suspend() - Suspend CPUFreq governors |
1575 | * | 1600 | * |
1576 | * This function is only executed for the boot processor. The other CPUs | 1601 | * Called during system wide Suspend/Hibernate cycles for suspending governors |
1577 | * have been put offline by means of CPU hotplug. | 1602 | * as some platforms can't change frequency after this point in suspend cycle. |
1603 | * Because some of the devices (like: i2c, regulators, etc) they use for | ||
1604 | * changing frequency are suspended quickly after this point. | ||
1578 | */ | 1605 | */ |
1579 | static int cpufreq_bp_suspend(void) | 1606 | void cpufreq_suspend(void) |
1580 | { | 1607 | { |
1581 | int ret = 0; | ||
1582 | |||
1583 | int cpu = smp_processor_id(); | ||
1584 | struct cpufreq_policy *policy; | 1608 | struct cpufreq_policy *policy; |
1585 | 1609 | ||
1586 | pr_debug("suspending cpu %u\n", cpu); | 1610 | if (!cpufreq_driver) |
1611 | return; | ||
1587 | 1612 | ||
1588 | /* If there's no policy for the boot CPU, we have nothing to do. */ | 1613 | if (!has_target()) |
1589 | policy = cpufreq_cpu_get(cpu); | 1614 | return; |
1590 | if (!policy) | ||
1591 | return 0; | ||
1592 | 1615 | ||
1593 | if (cpufreq_driver->suspend) { | 1616 | pr_debug("%s: Suspending Governors\n", __func__); |
1594 | ret = cpufreq_driver->suspend(policy); | 1617 | |
1595 | if (ret) | 1618 | list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { |
1596 | printk(KERN_ERR "cpufreq: suspend failed in ->suspend " | 1619 | if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) |
1597 | "step on CPU %u\n", policy->cpu); | 1620 | pr_err("%s: Failed to stop governor for policy: %p\n", |
1621 | __func__, policy); | ||
1622 | else if (cpufreq_driver->suspend | ||
1623 | && cpufreq_driver->suspend(policy)) | ||
1624 | pr_err("%s: Failed to suspend driver: %p\n", __func__, | ||
1625 | policy); | ||
1598 | } | 1626 | } |
1599 | 1627 | ||
1600 | cpufreq_cpu_put(policy); | 1628 | cpufreq_suspended = true; |
1601 | return ret; | ||
1602 | } | 1629 | } |
1603 | 1630 | ||
1604 | /** | 1631 | /** |
1605 | * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. | 1632 | * cpufreq_resume() - Resume CPUFreq governors |
1606 | * | 1633 | * |
1607 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) | 1634 | * Called during system wide Suspend/Hibernate cycle for resuming governors that |
1608 | * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are | 1635 | * are suspended with cpufreq_suspend(). |
1609 | * restored. It will verify that the current freq is in sync with | ||
1610 | * what we believe it to be. This is a bit later than when it | ||
1611 | * should be, but nonethteless it's better than calling | ||
1612 | * cpufreq_driver->get() here which might re-enable interrupts... | ||
1613 | * | ||
1614 | * This function is only executed for the boot CPU. The other CPUs have not | ||
1615 | * been turned on yet. | ||
1616 | */ | 1636 | */ |
1617 | static void cpufreq_bp_resume(void) | 1637 | void cpufreq_resume(void) |
1618 | { | 1638 | { |
1619 | int ret = 0; | ||
1620 | |||
1621 | int cpu = smp_processor_id(); | ||
1622 | struct cpufreq_policy *policy; | 1639 | struct cpufreq_policy *policy; |
1623 | 1640 | ||
1624 | pr_debug("resuming cpu %u\n", cpu); | 1641 | if (!cpufreq_driver) |
1642 | return; | ||
1625 | 1643 | ||
1626 | /* If there's no policy for the boot CPU, we have nothing to do. */ | 1644 | if (!has_target()) |
1627 | policy = cpufreq_cpu_get(cpu); | ||
1628 | if (!policy) | ||
1629 | return; | 1645 | return; |
1630 | 1646 | ||
1631 | if (cpufreq_driver->resume) { | 1647 | pr_debug("%s: Resuming Governors\n", __func__); |
1632 | ret = cpufreq_driver->resume(policy); | ||
1633 | if (ret) { | ||
1634 | printk(KERN_ERR "cpufreq: resume failed in ->resume " | ||
1635 | "step on CPU %u\n", policy->cpu); | ||
1636 | goto fail; | ||
1637 | } | ||
1638 | } | ||
1639 | 1648 | ||
1640 | schedule_work(&policy->update); | 1649 | cpufreq_suspended = false; |
1641 | 1650 | ||
1642 | fail: | 1651 | list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { |
1643 | cpufreq_cpu_put(policy); | 1652 | if (__cpufreq_governor(policy, CPUFREQ_GOV_START) |
1644 | } | 1653 | || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) |
1654 | pr_err("%s: Failed to start governor for policy: %p\n", | ||
1655 | __func__, policy); | ||
1656 | else if (cpufreq_driver->resume | ||
1657 | && cpufreq_driver->resume(policy)) | ||
1658 | pr_err("%s: Failed to resume driver: %p\n", __func__, | ||
1659 | policy); | ||
1645 | 1660 | ||
1646 | static struct syscore_ops cpufreq_syscore_ops = { | 1661 | /* |
1647 | .suspend = cpufreq_bp_suspend, | 1662 | * schedule call cpufreq_update_policy() for boot CPU, i.e. last |
1648 | .resume = cpufreq_bp_resume, | 1663 | * policy in list. It will verify that the current freq is in |
1649 | }; | 1664 | * sync with what we believe it to be. |
1665 | */ | ||
1666 | if (list_is_last(&policy->policy_list, &cpufreq_policy_list)) | ||
1667 | schedule_work(&policy->update); | ||
1668 | } | ||
1669 | } | ||
1650 | 1670 | ||
1651 | /** | 1671 | /** |
1652 | * cpufreq_get_current_driver - return current driver's name | 1672 | * cpufreq_get_current_driver - return current driver's name |
@@ -1762,7 +1782,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1762 | target_freq = policy->min; | 1782 | target_freq = policy->min; |
1763 | 1783 | ||
1764 | pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", | 1784 | pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", |
1765 | policy->cpu, target_freq, relation, old_target_freq); | 1785 | policy->cpu, target_freq, relation, old_target_freq); |
1766 | 1786 | ||
1767 | /* | 1787 | /* |
1768 | * This might look like a redundant call as we are checking it again | 1788 | * This might look like a redundant call as we are checking it again |
@@ -1807,8 +1827,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1807 | freqs.flags = 0; | 1827 | freqs.flags = 0; |
1808 | 1828 | ||
1809 | pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", | 1829 | pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", |
1810 | __func__, policy->cpu, freqs.old, | 1830 | __func__, policy->cpu, freqs.old, freqs.new); |
1811 | freqs.new); | ||
1812 | 1831 | ||
1813 | cpufreq_notify_transition(policy, &freqs, | 1832 | cpufreq_notify_transition(policy, &freqs, |
1814 | CPUFREQ_PRECHANGE); | 1833 | CPUFREQ_PRECHANGE); |
@@ -1817,7 +1836,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1817 | retval = cpufreq_driver->target_index(policy, index); | 1836 | retval = cpufreq_driver->target_index(policy, index); |
1818 | if (retval) | 1837 | if (retval) |
1819 | pr_err("%s: Failed to change cpu frequency: %d\n", | 1838 | pr_err("%s: Failed to change cpu frequency: %d\n", |
1820 | __func__, retval); | 1839 | __func__, retval); |
1821 | 1840 | ||
1822 | if (notify) | 1841 | if (notify) |
1823 | cpufreq_notify_post_transition(policy, &freqs, retval); | 1842 | cpufreq_notify_post_transition(policy, &freqs, retval); |
@@ -1863,17 +1882,18 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, | |||
1863 | struct cpufreq_governor *gov = NULL; | 1882 | struct cpufreq_governor *gov = NULL; |
1864 | #endif | 1883 | #endif |
1865 | 1884 | ||
1885 | /* Don't start any governor operations if we are entering suspend */ | ||
1886 | if (cpufreq_suspended) | ||
1887 | return 0; | ||
1888 | |||
1866 | if (policy->governor->max_transition_latency && | 1889 | if (policy->governor->max_transition_latency && |
1867 | policy->cpuinfo.transition_latency > | 1890 | policy->cpuinfo.transition_latency > |
1868 | policy->governor->max_transition_latency) { | 1891 | policy->governor->max_transition_latency) { |
1869 | if (!gov) | 1892 | if (!gov) |
1870 | return -EINVAL; | 1893 | return -EINVAL; |
1871 | else { | 1894 | else { |
1872 | printk(KERN_WARNING "%s governor failed, too long" | 1895 | pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n", |
1873 | " transition latency of HW, fallback" | 1896 | policy->governor->name, gov->name); |
1874 | " to %s governor\n", | ||
1875 | policy->governor->name, | ||
1876 | gov->name); | ||
1877 | policy->governor = gov; | 1897 | policy->governor = gov; |
1878 | } | 1898 | } |
1879 | } | 1899 | } |
@@ -1883,7 +1903,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, | |||
1883 | return -EINVAL; | 1903 | return -EINVAL; |
1884 | 1904 | ||
1885 | pr_debug("__cpufreq_governor for CPU %u, event %u\n", | 1905 | pr_debug("__cpufreq_governor for CPU %u, event %u\n", |
1886 | policy->cpu, event); | 1906 | policy->cpu, event); |
1887 | 1907 | ||
1888 | mutex_lock(&cpufreq_governor_lock); | 1908 | mutex_lock(&cpufreq_governor_lock); |
1889 | if ((policy->governor_enabled && event == CPUFREQ_GOV_START) | 1909 | if ((policy->governor_enabled && event == CPUFREQ_GOV_START) |
@@ -1950,9 +1970,7 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor); | |||
1950 | 1970 | ||
1951 | void cpufreq_unregister_governor(struct cpufreq_governor *governor) | 1971 | void cpufreq_unregister_governor(struct cpufreq_governor *governor) |
1952 | { | 1972 | { |
1953 | #ifdef CONFIG_HOTPLUG_CPU | ||
1954 | int cpu; | 1973 | int cpu; |
1955 | #endif | ||
1956 | 1974 | ||
1957 | if (!governor) | 1975 | if (!governor) |
1958 | return; | 1976 | return; |
@@ -1960,14 +1978,12 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) | |||
1960 | if (cpufreq_disabled()) | 1978 | if (cpufreq_disabled()) |
1961 | return; | 1979 | return; |
1962 | 1980 | ||
1963 | #ifdef CONFIG_HOTPLUG_CPU | ||
1964 | for_each_present_cpu(cpu) { | 1981 | for_each_present_cpu(cpu) { |
1965 | if (cpu_online(cpu)) | 1982 | if (cpu_online(cpu)) |
1966 | continue; | 1983 | continue; |
1967 | if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) | 1984 | if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) |
1968 | strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); | 1985 | strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); |
1969 | } | 1986 | } |
1970 | #endif | ||
1971 | 1987 | ||
1972 | mutex_lock(&cpufreq_governor_mutex); | 1988 | mutex_lock(&cpufreq_governor_mutex); |
1973 | list_del(&governor->governor_list); | 1989 | list_del(&governor->governor_list); |
@@ -2012,22 +2028,21 @@ EXPORT_SYMBOL(cpufreq_get_policy); | |||
2012 | static int cpufreq_set_policy(struct cpufreq_policy *policy, | 2028 | static int cpufreq_set_policy(struct cpufreq_policy *policy, |
2013 | struct cpufreq_policy *new_policy) | 2029 | struct cpufreq_policy *new_policy) |
2014 | { | 2030 | { |
2015 | int ret = 0, failed = 1; | 2031 | struct cpufreq_governor *old_gov; |
2032 | int ret; | ||
2016 | 2033 | ||
2017 | pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu, | 2034 | pr_debug("setting new policy for CPU %u: %u - %u kHz\n", |
2018 | new_policy->min, new_policy->max); | 2035 | new_policy->cpu, new_policy->min, new_policy->max); |
2019 | 2036 | ||
2020 | memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); | 2037 | memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); |
2021 | 2038 | ||
2022 | if (new_policy->min > policy->max || new_policy->max < policy->min) { | 2039 | if (new_policy->min > policy->max || new_policy->max < policy->min) |
2023 | ret = -EINVAL; | 2040 | return -EINVAL; |
2024 | goto error_out; | ||
2025 | } | ||
2026 | 2041 | ||
2027 | /* verify the cpu speed can be set within this limit */ | 2042 | /* verify the cpu speed can be set within this limit */ |
2028 | ret = cpufreq_driver->verify(new_policy); | 2043 | ret = cpufreq_driver->verify(new_policy); |
2029 | if (ret) | 2044 | if (ret) |
2030 | goto error_out; | 2045 | return ret; |
2031 | 2046 | ||
2032 | /* adjust if necessary - all reasons */ | 2047 | /* adjust if necessary - all reasons */ |
2033 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 2048 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
@@ -2043,7 +2058,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, | |||
2043 | */ | 2058 | */ |
2044 | ret = cpufreq_driver->verify(new_policy); | 2059 | ret = cpufreq_driver->verify(new_policy); |
2045 | if (ret) | 2060 | if (ret) |
2046 | goto error_out; | 2061 | return ret; |
2047 | 2062 | ||
2048 | /* notification of the new policy */ | 2063 | /* notification of the new policy */ |
2049 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 2064 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
@@ -2053,63 +2068,53 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, | |||
2053 | policy->max = new_policy->max; | 2068 | policy->max = new_policy->max; |
2054 | 2069 | ||
2055 | pr_debug("new min and max freqs are %u - %u kHz\n", | 2070 | pr_debug("new min and max freqs are %u - %u kHz\n", |
2056 | policy->min, policy->max); | 2071 | policy->min, policy->max); |
2057 | 2072 | ||
2058 | if (cpufreq_driver->setpolicy) { | 2073 | if (cpufreq_driver->setpolicy) { |
2059 | policy->policy = new_policy->policy; | 2074 | policy->policy = new_policy->policy; |
2060 | pr_debug("setting range\n"); | 2075 | pr_debug("setting range\n"); |
2061 | ret = cpufreq_driver->setpolicy(new_policy); | 2076 | return cpufreq_driver->setpolicy(new_policy); |
2062 | } else { | 2077 | } |
2063 | if (new_policy->governor != policy->governor) { | ||
2064 | /* save old, working values */ | ||
2065 | struct cpufreq_governor *old_gov = policy->governor; | ||
2066 | |||
2067 | pr_debug("governor switch\n"); | ||
2068 | |||
2069 | /* end old governor */ | ||
2070 | if (policy->governor) { | ||
2071 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); | ||
2072 | up_write(&policy->rwsem); | ||
2073 | __cpufreq_governor(policy, | ||
2074 | CPUFREQ_GOV_POLICY_EXIT); | ||
2075 | down_write(&policy->rwsem); | ||
2076 | } | ||
2077 | 2078 | ||
2078 | /* start new governor */ | 2079 | if (new_policy->governor == policy->governor) |
2079 | policy->governor = new_policy->governor; | 2080 | goto out; |
2080 | if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { | ||
2081 | if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) { | ||
2082 | failed = 0; | ||
2083 | } else { | ||
2084 | up_write(&policy->rwsem); | ||
2085 | __cpufreq_governor(policy, | ||
2086 | CPUFREQ_GOV_POLICY_EXIT); | ||
2087 | down_write(&policy->rwsem); | ||
2088 | } | ||
2089 | } | ||
2090 | 2081 | ||
2091 | if (failed) { | 2082 | pr_debug("governor switch\n"); |
2092 | /* new governor failed, so re-start old one */ | 2083 | |
2093 | pr_debug("starting governor %s failed\n", | 2084 | /* save old, working values */ |
2094 | policy->governor->name); | 2085 | old_gov = policy->governor; |
2095 | if (old_gov) { | 2086 | /* end old governor */ |
2096 | policy->governor = old_gov; | 2087 | if (old_gov) { |
2097 | __cpufreq_governor(policy, | 2088 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); |
2098 | CPUFREQ_GOV_POLICY_INIT); | 2089 | up_write(&policy->rwsem); |
2099 | __cpufreq_governor(policy, | 2090 | __cpufreq_governor(policy,CPUFREQ_GOV_POLICY_EXIT); |
2100 | CPUFREQ_GOV_START); | 2091 | down_write(&policy->rwsem); |
2101 | } | ||
2102 | ret = -EINVAL; | ||
2103 | goto error_out; | ||
2104 | } | ||
2105 | /* might be a policy change, too, so fall through */ | ||
2106 | } | ||
2107 | pr_debug("governor: change or update limits\n"); | ||
2108 | ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); | ||
2109 | } | 2092 | } |
2110 | 2093 | ||
2111 | error_out: | 2094 | /* start new governor */ |
2112 | return ret; | 2095 | policy->governor = new_policy->governor; |
2096 | if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { | ||
2097 | if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) | ||
2098 | goto out; | ||
2099 | |||
2100 | up_write(&policy->rwsem); | ||
2101 | __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); | ||
2102 | down_write(&policy->rwsem); | ||
2103 | } | ||
2104 | |||
2105 | /* new governor failed, so re-start old one */ | ||
2106 | pr_debug("starting governor %s failed\n", policy->governor->name); | ||
2107 | if (old_gov) { | ||
2108 | policy->governor = old_gov; | ||
2109 | __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); | ||
2110 | __cpufreq_governor(policy, CPUFREQ_GOV_START); | ||
2111 | } | ||
2112 | |||
2113 | return -EINVAL; | ||
2114 | |||
2115 | out: | ||
2116 | pr_debug("governor: change or update limits\n"); | ||
2117 | return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); | ||
2113 | } | 2118 | } |
2114 | 2119 | ||
2115 | /** | 2120 | /** |
@@ -2145,8 +2150,13 @@ int cpufreq_update_policy(unsigned int cpu) | |||
2145 | */ | 2150 | */ |
2146 | if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { | 2151 | if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { |
2147 | new_policy.cur = cpufreq_driver->get(cpu); | 2152 | new_policy.cur = cpufreq_driver->get(cpu); |
2153 | if (WARN_ON(!new_policy.cur)) { | ||
2154 | ret = -EIO; | ||
2155 | goto no_policy; | ||
2156 | } | ||
2157 | |||
2148 | if (!policy->cur) { | 2158 | if (!policy->cur) { |
2149 | pr_debug("Driver did not initialize current freq"); | 2159 | pr_debug("Driver did not initialize current freq\n"); |
2150 | policy->cur = new_policy.cur; | 2160 | policy->cur = new_policy.cur; |
2151 | } else { | 2161 | } else { |
2152 | if (policy->cur != new_policy.cur && has_target()) | 2162 | if (policy->cur != new_policy.cur && has_target()) |
@@ -2170,30 +2180,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, | |||
2170 | { | 2180 | { |
2171 | unsigned int cpu = (unsigned long)hcpu; | 2181 | unsigned int cpu = (unsigned long)hcpu; |
2172 | struct device *dev; | 2182 | struct device *dev; |
2173 | bool frozen = false; | ||
2174 | 2183 | ||
2175 | dev = get_cpu_device(cpu); | 2184 | dev = get_cpu_device(cpu); |
2176 | if (dev) { | 2185 | if (dev) { |
2177 | |||
2178 | if (action & CPU_TASKS_FROZEN) | ||
2179 | frozen = true; | ||
2180 | |||
2181 | switch (action & ~CPU_TASKS_FROZEN) { | 2186 | switch (action & ~CPU_TASKS_FROZEN) { |
2182 | case CPU_ONLINE: | 2187 | case CPU_ONLINE: |
2183 | __cpufreq_add_dev(dev, NULL, frozen); | 2188 | __cpufreq_add_dev(dev, NULL); |
2184 | cpufreq_update_policy(cpu); | ||
2185 | break; | 2189 | break; |
2186 | 2190 | ||
2187 | case CPU_DOWN_PREPARE: | 2191 | case CPU_DOWN_PREPARE: |
2188 | __cpufreq_remove_dev_prepare(dev, NULL, frozen); | 2192 | __cpufreq_remove_dev_prepare(dev, NULL); |
2189 | break; | 2193 | break; |
2190 | 2194 | ||
2191 | case CPU_POST_DEAD: | 2195 | case CPU_POST_DEAD: |
2192 | __cpufreq_remove_dev_finish(dev, NULL, frozen); | 2196 | __cpufreq_remove_dev_finish(dev, NULL); |
2193 | break; | 2197 | break; |
2194 | 2198 | ||
2195 | case CPU_DOWN_FAILED: | 2199 | case CPU_DOWN_FAILED: |
2196 | __cpufreq_add_dev(dev, NULL, frozen); | 2200 | __cpufreq_add_dev(dev, NULL); |
2197 | break; | 2201 | break; |
2198 | } | 2202 | } |
2199 | } | 2203 | } |
@@ -2249,8 +2253,8 @@ int cpufreq_boost_trigger_state(int state) | |||
2249 | cpufreq_driver->boost_enabled = !state; | 2253 | cpufreq_driver->boost_enabled = !state; |
2250 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 2254 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
2251 | 2255 | ||
2252 | pr_err("%s: Cannot %s BOOST\n", __func__, | 2256 | pr_err("%s: Cannot %s BOOST\n", |
2253 | state ? "enable" : "disable"); | 2257 | __func__, state ? "enable" : "disable"); |
2254 | } | 2258 | } |
2255 | 2259 | ||
2256 | return ret; | 2260 | return ret; |
@@ -2322,7 +2326,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
2322 | ret = cpufreq_sysfs_create_file(&boost.attr); | 2326 | ret = cpufreq_sysfs_create_file(&boost.attr); |
2323 | if (ret) { | 2327 | if (ret) { |
2324 | pr_err("%s: cannot register global BOOST sysfs file\n", | 2328 | pr_err("%s: cannot register global BOOST sysfs file\n", |
2325 | __func__); | 2329 | __func__); |
2326 | goto err_null_driver; | 2330 | goto err_null_driver; |
2327 | } | 2331 | } |
2328 | } | 2332 | } |
@@ -2345,7 +2349,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
2345 | /* if all ->init() calls failed, unregister */ | 2349 | /* if all ->init() calls failed, unregister */ |
2346 | if (ret) { | 2350 | if (ret) { |
2347 | pr_debug("no CPU initialized for driver %s\n", | 2351 | pr_debug("no CPU initialized for driver %s\n", |
2348 | driver_data->name); | 2352 | driver_data->name); |
2349 | goto err_if_unreg; | 2353 | goto err_if_unreg; |
2350 | } | 2354 | } |
2351 | } | 2355 | } |
@@ -2409,7 +2413,6 @@ static int __init cpufreq_core_init(void) | |||
2409 | 2413 | ||
2410 | cpufreq_global_kobject = kobject_create(); | 2414 | cpufreq_global_kobject = kobject_create(); |
2411 | BUG_ON(!cpufreq_global_kobject); | 2415 | BUG_ON(!cpufreq_global_kobject); |
2412 | register_syscore_ops(&cpufreq_syscore_ops); | ||
2413 | 2416 | ||
2414 | return 0; | 2417 | return 0; |
2415 | } | 2418 | } |