aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 87299924e735..52cf1f021825 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -239,6 +239,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies, 239 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
240 this_dbs_info->prev_cpu_wall); 240 this_dbs_info->prev_cpu_wall);
241 this_dbs_info->prev_cpu_wall = cur_jiffies; 241 this_dbs_info->prev_cpu_wall = cur_jiffies;
242 if (!total_ticks)
243 return;
242 /* 244 /*
243 * Every sampling_rate, we check, if current idle time is less 245 * Every sampling_rate, we check, if current idle time is less
244 * than 20% (default), then we try to increase frequency 246 * than 20% (default), then we try to increase frequency
@@ -304,7 +306,12 @@ static void do_dbs_timer(void *data)
304 unsigned int cpu = smp_processor_id(); 306 unsigned int cpu = smp_processor_id();
305 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 307 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
306 308
309 if (!dbs_info->enable)
310 return;
311
312 lock_cpu_hotplug();
307 dbs_check_cpu(dbs_info); 313 dbs_check_cpu(dbs_info);
314 unlock_cpu_hotplug();
308 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, 315 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
309 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 316 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
310} 317}
@@ -319,11 +326,11 @@ static inline void dbs_timer_init(unsigned int cpu)
319 return; 326 return;
320} 327}
321 328
322static inline void dbs_timer_exit(unsigned int cpu) 329static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
323{ 330{
324 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 331 dbs_info->enable = 0;
325 332 cancel_delayed_work(&dbs_info->work);
326 cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work); 333 flush_workqueue(kondemand_wq);
327} 334}
328 335
329static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 336static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -396,8 +403,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
396 403
397 case CPUFREQ_GOV_STOP: 404 case CPUFREQ_GOV_STOP:
398 mutex_lock(&dbs_mutex); 405 mutex_lock(&dbs_mutex);
399 dbs_timer_exit(policy->cpu); 406 dbs_timer_exit(this_dbs_info);
400 this_dbs_info->enable = 0;
401 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 407 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
402 dbs_enable--; 408 dbs_enable--;
403 if (dbs_enable == 0) 409 if (dbs_enable == 0)
@@ -408,7 +414,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
408 break; 414 break;
409 415
410 case CPUFREQ_GOV_LIMITS: 416 case CPUFREQ_GOV_LIMITS:
411 lock_cpu_hotplug();
412 mutex_lock(&dbs_mutex); 417 mutex_lock(&dbs_mutex);
413 if (policy->max < this_dbs_info->cur_policy->cur) 418 if (policy->max < this_dbs_info->cur_policy->cur)
414 __cpufreq_driver_target(this_dbs_info->cur_policy, 419 __cpufreq_driver_target(this_dbs_info->cur_policy,
@@ -419,7 +424,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
419 policy->min, 424 policy->min,
420 CPUFREQ_RELATION_L); 425 CPUFREQ_RELATION_L);
421 mutex_unlock(&dbs_mutex); 426 mutex_unlock(&dbs_mutex);
422 unlock_cpu_hotplug();
423 break; 427 break;
424 } 428 }
425 return 0; 429 return 0;