aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@macmini.osdl.org>2006-07-23 15:05:00 -0400
committerLinus Torvalds <torvalds@macmini.osdl.org>2006-07-23 15:05:00 -0400
commit2cd7cbdf4bd0d0fe58e4dc903e8b413412595504 (patch)
treead368b1f9a26d06be6e8bd26d821f17670ad2ef4 /drivers/cpufreq
parent12157a8d78af50842774bedb80b7b84a87f60951 (diff)
[cpufreq] ondemand: make shutdown sequence more robust
Shutting down the ondemand policy was fraught with potential problems, causing issues for SMP suspend (which wants to hot- unplug) all but the last CPU. This should fix at least the worst problems (divide-by-zero and infinite wait for the workqueue to shut down). Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 87299924e735..178f0c547eb7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -239,6 +239,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies, 239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
240 this_dbs_info->prev_cpu_wall); 240 this_dbs_info->prev_cpu_wall);
241 this_dbs_info->prev_cpu_wall = cur_jiffies; 241 this_dbs_info->prev_cpu_wall = cur_jiffies;
242 if (!total_ticks)
243 return;
242 /* 244 /*
243 * Every sampling_rate, we check, if current idle time is less 245 * Every sampling_rate, we check, if current idle time is less
244 * than 20% (default), then we try to increase frequency 246 * than 20% (default), then we try to increase frequency
@@ -304,6 +306,9 @@ static void do_dbs_timer(void *data)
304 unsigned int cpu = smp_processor_id(); 306 unsigned int cpu = smp_processor_id();
305 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 307 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
306 308
309 if (!dbs_info->enable)
310 return;
311
307 dbs_check_cpu(dbs_info); 312 dbs_check_cpu(dbs_info);
308 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 313 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
309 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 314 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
@@ -319,11 +324,11 @@ static inline void dbs_timer_init(unsigned int cpu)
319 return; 324 return;
320} 325}
321 326
322static inline void dbs_timer_exit(unsigned int cpu) 327static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
323{ 328{
324 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 329 dbs_info->enable = 0;
325 330 cancel_delayed_work(&dbs_info->work);
326 cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work); 331 flush_workqueue(kondemand_wq);
327} 332}
328 333
329static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 334static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -396,8 +401,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
396 401
397 case CPUFREQ_GOV_STOP: 402 case CPUFREQ_GOV_STOP:
398 mutex_lock(&dbs_mutex); 403 mutex_lock(&dbs_mutex);
399 dbs_timer_exit(policy->cpu); 404 dbs_timer_exit(this_dbs_info);
400 this_dbs_info->enable = 0;
401 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 405 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
402 dbs_enable--; 406 dbs_enable--;
403 if (dbs_enable == 0) 407 if (dbs_enable == 0)