diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 27 |
1 files changed, 11 insertions, 16 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 1911d1729353..246ae147df74 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -78,15 +78,10 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | |||
78 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 78 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
79 | 79 | ||
80 | /* | 80 | /* |
81 | * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug | 81 | * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on |
82 | * lock and dbs_mutex. cpu_hotplug lock should always be held before | 82 | * different CPUs. It protects dbs_enable in governor start/stop. It also |
83 | * dbs_mutex. If any function that can potentially take cpu_hotplug lock | 83 | * serializes governor limit_change with do_dbs_timer. We do not want |
84 | * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then | 84 | * do_dbs_timer to run when user is changing the governor or limits. |
85 | * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock | ||
86 | * is recursive for the same process. -Venki | ||
87 | * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it | ||
88 | * would deadlock with cancel_delayed_work_sync(), which is needed for proper | ||
89 | * raceless workqueue teardown. | ||
90 | */ | 85 | */ |
91 | static DEFINE_MUTEX(dbs_mutex); | 86 | static DEFINE_MUTEX(dbs_mutex); |
92 | 87 | ||
@@ -494,11 +489,10 @@ static void do_dbs_timer(struct work_struct *work) | |||
494 | 489 | ||
495 | delay -= jiffies % delay; | 490 | delay -= jiffies % delay; |
496 | 491 | ||
497 | if (lock_policy_rwsem_write(cpu) < 0) | 492 | mutex_lock(&dbs_mutex); |
498 | return; | ||
499 | 493 | ||
500 | if (!dbs_info->enable) { | 494 | if (!dbs_info->enable) { |
501 | unlock_policy_rwsem_write(cpu); | 495 | mutex_unlock(&dbs_mutex); |
502 | return; | 496 | return; |
503 | } | 497 | } |
504 | 498 | ||
@@ -517,7 +511,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
517 | dbs_info->freq_lo, CPUFREQ_RELATION_H); | 511 | dbs_info->freq_lo, CPUFREQ_RELATION_H); |
518 | } | 512 | } |
519 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 513 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); |
520 | unlock_policy_rwsem_write(cpu); | 514 | mutex_unlock(&dbs_mutex); |
521 | } | 515 | } |
522 | 516 | ||
523 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | 517 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) |
@@ -598,14 +592,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
598 | max(min_sampling_rate, | 592 | max(min_sampling_rate, |
599 | latency * LATENCY_MULTIPLIER); | 593 | latency * LATENCY_MULTIPLIER); |
600 | } | 594 | } |
601 | dbs_timer_init(this_dbs_info); | ||
602 | |||
603 | mutex_unlock(&dbs_mutex); | 595 | mutex_unlock(&dbs_mutex); |
596 | |||
597 | dbs_timer_init(this_dbs_info); | ||
604 | break; | 598 | break; |
605 | 599 | ||
606 | case CPUFREQ_GOV_STOP: | 600 | case CPUFREQ_GOV_STOP: |
607 | mutex_lock(&dbs_mutex); | ||
608 | dbs_timer_exit(this_dbs_info); | 601 | dbs_timer_exit(this_dbs_info); |
602 | |||
603 | mutex_lock(&dbs_mutex); | ||
609 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); | 604 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); |
610 | dbs_enable--; | 605 | dbs_enable--; |
611 | mutex_unlock(&dbs_mutex); | 606 | mutex_unlock(&dbs_mutex); |