aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/cpufreq.c51
1 files changed, 22 insertions, 29 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index dddbefb857c9..571723b338f9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1041,13 +1041,13 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1041 CPUFREQ_UPDATE_POLICY_CPU, policy); 1041 CPUFREQ_UPDATE_POLICY_CPU, policy);
1042} 1042}
1043 1043
1044static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, 1044static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1045 bool frozen)
1046{ 1045{
1047 unsigned int j, cpu = dev->id; 1046 unsigned int j, cpu = dev->id;
1048 int ret = -ENOMEM; 1047 int ret = -ENOMEM;
1049 struct cpufreq_policy *policy; 1048 struct cpufreq_policy *policy;
1050 unsigned long flags; 1049 unsigned long flags;
1050 bool recover_policy = cpufreq_suspended;
1051#ifdef CONFIG_HOTPLUG_CPU 1051#ifdef CONFIG_HOTPLUG_CPU
1052 struct cpufreq_policy *tpolicy; 1052 struct cpufreq_policy *tpolicy;
1053#endif 1053#endif
@@ -1088,9 +1088,9 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1088 * Restore the saved policy when doing light-weight init and fall back 1088 * Restore the saved policy when doing light-weight init and fall back
1089 * to the full init if that fails. 1089 * to the full init if that fails.
1090 */ 1090 */
1091 policy = frozen ? cpufreq_policy_restore(cpu) : NULL; 1091 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1092 if (!policy) { 1092 if (!policy) {
1093 frozen = false; 1093 recover_policy = false;
1094 policy = cpufreq_policy_alloc(); 1094 policy = cpufreq_policy_alloc();
1095 if (!policy) 1095 if (!policy)
1096 goto nomem_out; 1096 goto nomem_out;
@@ -1102,7 +1102,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1102 * the creation of a brand new one. So we need to perform this update 1102 * the creation of a brand new one. So we need to perform this update
1103 * by invoking update_policy_cpu(). 1103 * by invoking update_policy_cpu().
1104 */ 1104 */
1105 if (frozen && cpu != policy->cpu) 1105 if (recover_policy && cpu != policy->cpu)
1106 update_policy_cpu(policy, cpu); 1106 update_policy_cpu(policy, cpu);
1107 else 1107 else
1108 policy->cpu = cpu; 1108 policy->cpu = cpu;
@@ -1130,7 +1130,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1130 */ 1130 */
1131 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 1131 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1132 1132
1133 if (!frozen) { 1133 if (!recover_policy) {
1134 policy->user_policy.min = policy->min; 1134 policy->user_policy.min = policy->min;
1135 policy->user_policy.max = policy->max; 1135 policy->user_policy.max = policy->max;
1136 } 1136 }
@@ -1192,7 +1192,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1192 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1192 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1193 CPUFREQ_START, policy); 1193 CPUFREQ_START, policy);
1194 1194
1195 if (!frozen) { 1195 if (!recover_policy) {
1196 ret = cpufreq_add_dev_interface(policy, dev); 1196 ret = cpufreq_add_dev_interface(policy, dev);
1197 if (ret) 1197 if (ret)
1198 goto err_out_unregister; 1198 goto err_out_unregister;
@@ -1206,7 +1206,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
1206 1206
1207 cpufreq_init_policy(policy); 1207 cpufreq_init_policy(policy);
1208 1208
1209 if (!frozen) { 1209 if (!recover_policy) {
1210 policy->user_policy.policy = policy->policy; 1210 policy->user_policy.policy = policy->policy;
1211 policy->user_policy.governor = policy->governor; 1211 policy->user_policy.governor = policy->governor;
1212 } 1212 }
@@ -1229,7 +1229,7 @@ err_get_freq:
1229 if (cpufreq_driver->exit) 1229 if (cpufreq_driver->exit)
1230 cpufreq_driver->exit(policy); 1230 cpufreq_driver->exit(policy);
1231err_set_policy_cpu: 1231err_set_policy_cpu:
1232 if (frozen) { 1232 if (recover_policy) {
1233 /* Do not leave stale fallback data behind. */ 1233 /* Do not leave stale fallback data behind. */
1234 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; 1234 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1235 cpufreq_policy_put_kobj(policy); 1235 cpufreq_policy_put_kobj(policy);
@@ -1253,7 +1253,7 @@ nomem_out:
1253 */ 1253 */
1254static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 1254static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1255{ 1255{
1256 return __cpufreq_add_dev(dev, sif, false); 1256 return __cpufreq_add_dev(dev, sif);
1257} 1257}
1258 1258
1259static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy, 1259static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
@@ -1284,8 +1284,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1284} 1284}
1285 1285
1286static int __cpufreq_remove_dev_prepare(struct device *dev, 1286static int __cpufreq_remove_dev_prepare(struct device *dev,
1287 struct subsys_interface *sif, 1287 struct subsys_interface *sif)
1288 bool frozen)
1289{ 1288{
1290 unsigned int cpu = dev->id, cpus; 1289 unsigned int cpu = dev->id, cpus;
1291 int new_cpu, ret; 1290 int new_cpu, ret;
@@ -1299,7 +1298,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1299 policy = per_cpu(cpufreq_cpu_data, cpu); 1298 policy = per_cpu(cpufreq_cpu_data, cpu);
1300 1299
1301 /* Save the policy somewhere when doing a light-weight tear-down */ 1300 /* Save the policy somewhere when doing a light-weight tear-down */
1302 if (frozen) 1301 if (cpufreq_suspended)
1303 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; 1302 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1304 1303
1305 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1304 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -1332,7 +1331,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1332 if (new_cpu >= 0) { 1331 if (new_cpu >= 0) {
1333 update_policy_cpu(policy, new_cpu); 1332 update_policy_cpu(policy, new_cpu);
1334 1333
1335 if (!frozen) { 1334 if (!cpufreq_suspended) {
1336 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", 1335 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1337 __func__, new_cpu, cpu); 1336 __func__, new_cpu, cpu);
1338 } 1337 }
@@ -1343,8 +1342,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1343} 1342}
1344 1343
1345static int __cpufreq_remove_dev_finish(struct device *dev, 1344static int __cpufreq_remove_dev_finish(struct device *dev,
1346 struct subsys_interface *sif, 1345 struct subsys_interface *sif)
1347 bool frozen)
1348{ 1346{
1349 unsigned int cpu = dev->id, cpus; 1347 unsigned int cpu = dev->id, cpus;
1350 int ret; 1348 int ret;
@@ -1379,7 +1377,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1379 } 1377 }
1380 } 1378 }
1381 1379
1382 if (!frozen) 1380 if (!cpufreq_suspended)
1383 cpufreq_policy_put_kobj(policy); 1381 cpufreq_policy_put_kobj(policy);
1384 1382
1385 /* 1383 /*
@@ -1395,7 +1393,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1395 list_del(&policy->policy_list); 1393 list_del(&policy->policy_list);
1396 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1394 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1397 1395
1398 if (!frozen) 1396 if (!cpufreq_suspended)
1399 cpufreq_policy_free(policy); 1397 cpufreq_policy_free(policy);
1400 } else { 1398 } else {
1401 if (has_target()) { 1399 if (has_target()) {
@@ -1425,10 +1423,10 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1425 if (cpu_is_offline(cpu)) 1423 if (cpu_is_offline(cpu))
1426 return 0; 1424 return 0;
1427 1425
1428 ret = __cpufreq_remove_dev_prepare(dev, sif, false); 1426 ret = __cpufreq_remove_dev_prepare(dev, sif);
1429 1427
1430 if (!ret) 1428 if (!ret)
1431 ret = __cpufreq_remove_dev_finish(dev, sif, false); 1429 ret = __cpufreq_remove_dev_finish(dev, sif);
1432 1430
1433 return ret; 1431 return ret;
1434} 1432}
@@ -2182,29 +2180,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
2182{ 2180{
2183 unsigned int cpu = (unsigned long)hcpu; 2181 unsigned int cpu = (unsigned long)hcpu;
2184 struct device *dev; 2182 struct device *dev;
2185 bool frozen = false;
2186 2183
2187 dev = get_cpu_device(cpu); 2184 dev = get_cpu_device(cpu);
2188 if (dev) { 2185 if (dev) {
2189
2190 if (action & CPU_TASKS_FROZEN)
2191 frozen = true;
2192
2193 switch (action & ~CPU_TASKS_FROZEN) { 2186 switch (action & ~CPU_TASKS_FROZEN) {
2194 case CPU_ONLINE: 2187 case CPU_ONLINE:
2195 __cpufreq_add_dev(dev, NULL, frozen); 2188 __cpufreq_add_dev(dev, NULL);
2196 break; 2189 break;
2197 2190
2198 case CPU_DOWN_PREPARE: 2191 case CPU_DOWN_PREPARE:
2199 __cpufreq_remove_dev_prepare(dev, NULL, frozen); 2192 __cpufreq_remove_dev_prepare(dev, NULL);
2200 break; 2193 break;
2201 2194
2202 case CPU_POST_DEAD: 2195 case CPU_POST_DEAD:
2203 __cpufreq_remove_dev_finish(dev, NULL, frozen); 2196 __cpufreq_remove_dev_finish(dev, NULL);
2204 break; 2197 break;
2205 2198
2206 case CPU_DOWN_FAILED: 2199 case CPU_DOWN_FAILED:
2207 __cpufreq_add_dev(dev, NULL, frozen); 2200 __cpufreq_add_dev(dev, NULL);
2208 break; 2201 break;
2209 } 2202 }
2210 } 2203 }