aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c27
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c27
3 files changed, 24 insertions, 34 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6e2ec0b18948..c7fe16e0474b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1070,8 +1070,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1070 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1070 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1071#endif 1071#endif
1072 1072
1073 unlock_policy_rwsem_write(cpu);
1074
1075 if (cpufreq_driver->target) 1073 if (cpufreq_driver->target)
1076 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1074 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1077 1075
@@ -1088,6 +1086,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1088 if (cpufreq_driver->exit) 1086 if (cpufreq_driver->exit)
1089 cpufreq_driver->exit(data); 1087 cpufreq_driver->exit(data);
1090 1088
1089 unlock_policy_rwsem_write(cpu);
1090
1091 free_cpumask_var(data->related_cpus); 1091 free_cpumask_var(data->related_cpus);
1092 free_cpumask_var(data->cpus); 1092 free_cpumask_var(data->cpus);
1093 kfree(data); 1093 kfree(data);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 7fc58af748b4..58889f26029a 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -70,15 +70,10 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
70static unsigned int dbs_enable; /* number of CPUs using this policy */ 70static unsigned int dbs_enable; /* number of CPUs using this policy */
71 71
72/* 72/*
73 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug 73 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
74 * lock and dbs_mutex. cpu_hotplug lock should always be held before 74 * different CPUs. It protects dbs_enable in governor start/stop. It also
75 * dbs_mutex. If any function that can potentially take cpu_hotplug lock 75 * serializes governor limit_change with do_dbs_timer. We do not want
76 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 76 * do_dbs_timer to run when user is changing the governor or limits.
77 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
78 * is recursive for the same process. -Venki
79 * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
80 * would deadlock with cancel_delayed_work_sync(), which is needed for proper
81 * raceless workqueue teardown.
82 */ 77 */
83static DEFINE_MUTEX(dbs_mutex); 78static DEFINE_MUTEX(dbs_mutex);
84 79
@@ -488,18 +483,17 @@ static void do_dbs_timer(struct work_struct *work)
488 483
489 delay -= jiffies % delay; 484 delay -= jiffies % delay;
490 485
491 if (lock_policy_rwsem_write(cpu) < 0) 486 mutex_lock(&dbs_mutex);
492 return;
493 487
494 if (!dbs_info->enable) { 488 if (!dbs_info->enable) {
495 unlock_policy_rwsem_write(cpu); 489 mutex_unlock(&dbs_mutex);
496 return; 490 return;
497 } 491 }
498 492
499 dbs_check_cpu(dbs_info); 493 dbs_check_cpu(dbs_info);
500 494
501 queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); 495 queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
502 unlock_policy_rwsem_write(cpu); 496 mutex_unlock(&dbs_mutex);
503} 497}
504 498
505static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) 499static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
@@ -590,15 +584,16 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
590 &dbs_cpufreq_notifier_block, 584 &dbs_cpufreq_notifier_block,
591 CPUFREQ_TRANSITION_NOTIFIER); 585 CPUFREQ_TRANSITION_NOTIFIER);
592 } 586 }
593 dbs_timer_init(this_dbs_info);
594
595 mutex_unlock(&dbs_mutex); 587 mutex_unlock(&dbs_mutex);
596 588
589 dbs_timer_init(this_dbs_info);
590
597 break; 591 break;
598 592
599 case CPUFREQ_GOV_STOP: 593 case CPUFREQ_GOV_STOP:
600 mutex_lock(&dbs_mutex);
601 dbs_timer_exit(this_dbs_info); 594 dbs_timer_exit(this_dbs_info);
595
596 mutex_lock(&dbs_mutex);
602 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 597 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
603 dbs_enable--; 598 dbs_enable--;
604 599
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 1911d1729353..246ae147df74 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -78,15 +78,10 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
78static unsigned int dbs_enable; /* number of CPUs using this policy */ 78static unsigned int dbs_enable; /* number of CPUs using this policy */
79 79
80/* 80/*
81 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug 81 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
82 * lock and dbs_mutex. cpu_hotplug lock should always be held before 82 * different CPUs. It protects dbs_enable in governor start/stop. It also
83 * dbs_mutex. If any function that can potentially take cpu_hotplug lock 83 * serializes governor limit_change with do_dbs_timer. We do not want
84 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then 84 * do_dbs_timer to run when user is changing the governor or limits.
85 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
86 * is recursive for the same process. -Venki
87 * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it
88 * would deadlock with cancel_delayed_work_sync(), which is needed for proper
89 * raceless workqueue teardown.
90 */ 85 */
91static DEFINE_MUTEX(dbs_mutex); 86static DEFINE_MUTEX(dbs_mutex);
92 87
@@ -494,11 +489,10 @@ static void do_dbs_timer(struct work_struct *work)
494 489
495 delay -= jiffies % delay; 490 delay -= jiffies % delay;
496 491
497 if (lock_policy_rwsem_write(cpu) < 0) 492 mutex_lock(&dbs_mutex);
498 return;
499 493
500 if (!dbs_info->enable) { 494 if (!dbs_info->enable) {
501 unlock_policy_rwsem_write(cpu); 495 mutex_unlock(&dbs_mutex);
502 return; 496 return;
503 } 497 }
504 498
@@ -517,7 +511,7 @@ static void do_dbs_timer(struct work_struct *work)
517 dbs_info->freq_lo, CPUFREQ_RELATION_H); 511 dbs_info->freq_lo, CPUFREQ_RELATION_H);
518 } 512 }
519 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 513 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
520 unlock_policy_rwsem_write(cpu); 514 mutex_unlock(&dbs_mutex);
521} 515}
522 516
523static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) 517static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
@@ -598,14 +592,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
598 max(min_sampling_rate, 592 max(min_sampling_rate,
599 latency * LATENCY_MULTIPLIER); 593 latency * LATENCY_MULTIPLIER);
600 } 594 }
601 dbs_timer_init(this_dbs_info);
602
603 mutex_unlock(&dbs_mutex); 595 mutex_unlock(&dbs_mutex);
596
597 dbs_timer_init(this_dbs_info);
604 break; 598 break;
605 599
606 case CPUFREQ_GOV_STOP: 600 case CPUFREQ_GOV_STOP:
607 mutex_lock(&dbs_mutex);
608 dbs_timer_exit(this_dbs_info); 601 dbs_timer_exit(this_dbs_info);
602
603 mutex_lock(&dbs_mutex);
609 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 604 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
610 dbs_enable--; 605 dbs_enable--;
611 mutex_unlock(&dbs_mutex); 606 mutex_unlock(&dbs_mutex);