diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-08-04 18:28:46 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-08-04 18:28:46 -0400 |
| commit | 3f5760b90eb3bacfaa4d4c3e584152468ed327ca (patch) | |
| tree | 5d142091d4bec3aee9650ffe564d75bb348fd6ee | |
| parent | 624720e09c9b7913ef4bc6989878a6fcb7ecdff8 (diff) | |
| parent | 4bc5d34135039566b8d6efa2de7515b2be505da8 (diff) | |
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] Make cpufreq suspend code conditional on powerpc.
[CPUFREQ] Fix a kobject reference bug related to managed CPUs
[CPUFREQ] Do not set policy for offline cpus
[CPUFREQ] Fix NULL pointer dereference regression in conservative governor
| -rw-r--r-- | drivers/cpufreq/cpufreq.c | 27 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 6 |
2 files changed, 30 insertions, 3 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index b90eda8b3440..fd69086d08d5 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -858,6 +858,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
| 858 | 858 | ||
| 859 | /* Check for existing affected CPUs. | 859 | /* Check for existing affected CPUs. |
| 860 | * They may not be aware of it due to CPU Hotplug. | 860 | * They may not be aware of it due to CPU Hotplug. |
| 861 | * cpufreq_cpu_put is called when the device is removed | ||
| 862 | * in __cpufreq_remove_dev() | ||
| 861 | */ | 863 | */ |
| 862 | managed_policy = cpufreq_cpu_get(j); | 864 | managed_policy = cpufreq_cpu_get(j); |
| 863 | if (unlikely(managed_policy)) { | 865 | if (unlikely(managed_policy)) { |
| @@ -884,7 +886,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
| 884 | ret = sysfs_create_link(&sys_dev->kobj, | 886 | ret = sysfs_create_link(&sys_dev->kobj, |
| 885 | &managed_policy->kobj, | 887 | &managed_policy->kobj, |
| 886 | "cpufreq"); | 888 | "cpufreq"); |
| 887 | if (!ret) | 889 | if (ret) |
| 888 | cpufreq_cpu_put(managed_policy); | 890 | cpufreq_cpu_put(managed_policy); |
| 889 | /* | 891 | /* |
| 890 | * Success. We only needed to be added to the mask. | 892 | * Success. We only needed to be added to the mask. |
| @@ -924,6 +926,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
| 924 | 926 | ||
| 925 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 927 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
| 926 | for_each_cpu(j, policy->cpus) { | 928 | for_each_cpu(j, policy->cpus) { |
| 929 | if (!cpu_online(j)) | ||
| 930 | continue; | ||
| 927 | per_cpu(cpufreq_cpu_data, j) = policy; | 931 | per_cpu(cpufreq_cpu_data, j) = policy; |
| 928 | per_cpu(policy_cpu, j) = policy->cpu; | 932 | per_cpu(policy_cpu, j) = policy->cpu; |
| 929 | } | 933 | } |
| @@ -1244,13 +1248,22 @@ EXPORT_SYMBOL(cpufreq_get); | |||
| 1244 | 1248 | ||
| 1245 | static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | 1249 | static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) |
| 1246 | { | 1250 | { |
| 1247 | int cpu = sysdev->id; | ||
| 1248 | int ret = 0; | 1251 | int ret = 0; |
| 1252 | |||
| 1253 | #ifdef __powerpc__ | ||
| 1254 | int cpu = sysdev->id; | ||
| 1249 | unsigned int cur_freq = 0; | 1255 | unsigned int cur_freq = 0; |
| 1250 | struct cpufreq_policy *cpu_policy; | 1256 | struct cpufreq_policy *cpu_policy; |
| 1251 | 1257 | ||
| 1252 | dprintk("suspending cpu %u\n", cpu); | 1258 | dprintk("suspending cpu %u\n", cpu); |
| 1253 | 1259 | ||
| 1260 | /* | ||
| 1261 | * This whole bogosity is here because Powerbooks are made of fail. | ||
| 1262 | * No sane platform should need any of the code below to be run. | ||
| 1263 | * (it's entirely the wrong thing to do, as driver->get may | ||
| 1264 | * reenable interrupts on some architectures). | ||
| 1265 | */ | ||
| 1266 | |||
| 1254 | if (!cpu_online(cpu)) | 1267 | if (!cpu_online(cpu)) |
| 1255 | return 0; | 1268 | return 0; |
| 1256 | 1269 | ||
| @@ -1309,6 +1322,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | |||
| 1309 | 1322 | ||
| 1310 | out: | 1323 | out: |
| 1311 | cpufreq_cpu_put(cpu_policy); | 1324 | cpufreq_cpu_put(cpu_policy); |
| 1325 | #endif /* __powerpc__ */ | ||
| 1312 | return ret; | 1326 | return ret; |
| 1313 | } | 1327 | } |
| 1314 | 1328 | ||
| @@ -1322,12 +1336,18 @@ out: | |||
| 1322 | */ | 1336 | */ |
| 1323 | static int cpufreq_resume(struct sys_device *sysdev) | 1337 | static int cpufreq_resume(struct sys_device *sysdev) |
| 1324 | { | 1338 | { |
| 1325 | int cpu = sysdev->id; | ||
| 1326 | int ret = 0; | 1339 | int ret = 0; |
| 1340 | |||
| 1341 | #ifdef __powerpc__ | ||
| 1342 | int cpu = sysdev->id; | ||
| 1327 | struct cpufreq_policy *cpu_policy; | 1343 | struct cpufreq_policy *cpu_policy; |
| 1328 | 1344 | ||
| 1329 | dprintk("resuming cpu %u\n", cpu); | 1345 | dprintk("resuming cpu %u\n", cpu); |
| 1330 | 1346 | ||
| 1347 | /* As with the ->suspend method, all the code below is | ||
| 1348 | * only necessary because Powerbooks suck. | ||
| 1349 | * See commit 42d4dc3f4e1e for jokes. */ | ||
| 1350 | |||
| 1331 | if (!cpu_online(cpu)) | 1351 | if (!cpu_online(cpu)) |
| 1332 | return 0; | 1352 | return 0; |
| 1333 | 1353 | ||
| @@ -1391,6 +1411,7 @@ out: | |||
| 1391 | schedule_work(&cpu_policy->update); | 1411 | schedule_work(&cpu_policy->update); |
| 1392 | fail: | 1412 | fail: |
| 1393 | cpufreq_cpu_put(cpu_policy); | 1413 | cpufreq_cpu_put(cpu_policy); |
| 1414 | #endif /* __powerpc__ */ | ||
| 1394 | return ret; | 1415 | return ret; |
| 1395 | } | 1416 | } |
| 1396 | 1417 | ||
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 57490502b21c..bdea7e2f94ba 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
| @@ -63,6 +63,7 @@ struct cpu_dbs_info_s { | |||
| 63 | unsigned int down_skip; | 63 | unsigned int down_skip; |
| 64 | unsigned int requested_freq; | 64 | unsigned int requested_freq; |
| 65 | int cpu; | 65 | int cpu; |
| 66 | unsigned int enable:1; | ||
| 66 | /* | 67 | /* |
| 67 | * percpu mutex that serializes governor limit change with | 68 | * percpu mutex that serializes governor limit change with |
| 68 | * do_dbs_timer invocation. We do not want do_dbs_timer to run | 69 | * do_dbs_timer invocation. We do not want do_dbs_timer to run |
| @@ -141,6 +142,9 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
| 141 | 142 | ||
| 142 | struct cpufreq_policy *policy; | 143 | struct cpufreq_policy *policy; |
| 143 | 144 | ||
| 145 | if (!this_dbs_info->enable) | ||
| 146 | return 0; | ||
| 147 | |||
| 144 | policy = this_dbs_info->cur_policy; | 148 | policy = this_dbs_info->cur_policy; |
| 145 | 149 | ||
| 146 | /* | 150 | /* |
| @@ -497,6 +501,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
| 497 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 501 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
| 498 | delay -= jiffies % delay; | 502 | delay -= jiffies % delay; |
| 499 | 503 | ||
| 504 | dbs_info->enable = 1; | ||
| 500 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | 505 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); |
| 501 | queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, | 506 | queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, |
| 502 | delay); | 507 | delay); |
| @@ -504,6 +509,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
| 504 | 509 | ||
| 505 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 510 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
| 506 | { | 511 | { |
| 512 | dbs_info->enable = 0; | ||
| 507 | cancel_delayed_work_sync(&dbs_info->work); | 513 | cancel_delayed_work_sync(&dbs_info->work); |
| 508 | } | 514 | } |
| 509 | 515 | ||
