aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/cpufreq.c19
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c52
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c41
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c25
4 files changed, 70 insertions, 67 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 277a843a87a6..eb2f19d00e93 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -26,6 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/cpu.h> 27#include <linux/cpu.h>
28#include <linux/completion.h> 28#include <linux/completion.h>
29#include <linux/mutex.h>
29 30
30#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg) 31#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
31 32
@@ -55,7 +56,7 @@ static DECLARE_RWSEM (cpufreq_notifier_rwsem);
55 56
56 57
57static LIST_HEAD(cpufreq_governor_list); 58static LIST_HEAD(cpufreq_governor_list);
58static DECLARE_MUTEX (cpufreq_governor_sem); 59static DEFINE_MUTEX (cpufreq_governor_mutex);
59 60
60struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu) 61struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu)
61{ 62{
@@ -297,18 +298,18 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
297 return -EINVAL; 298 return -EINVAL;
298 } else { 299 } else {
299 struct cpufreq_governor *t; 300 struct cpufreq_governor *t;
300 down(&cpufreq_governor_sem); 301 mutex_lock(&cpufreq_governor_mutex);
301 if (!cpufreq_driver || !cpufreq_driver->target) 302 if (!cpufreq_driver || !cpufreq_driver->target)
302 goto out; 303 goto out;
303 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 304 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
304 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) { 305 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) {
305 *governor = t; 306 *governor = t;
306 up(&cpufreq_governor_sem); 307 mutex_unlock(&cpufreq_governor_mutex);
307 return 0; 308 return 0;
308 } 309 }
309 } 310 }
310 out: 311 out:
311 up(&cpufreq_governor_sem); 312 mutex_unlock(&cpufreq_governor_mutex);
312 } 313 }
313 return -EINVAL; 314 return -EINVAL;
314} 315}
@@ -1217,17 +1218,17 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
1217 if (!governor) 1218 if (!governor)
1218 return -EINVAL; 1219 return -EINVAL;
1219 1220
1220 down(&cpufreq_governor_sem); 1221 mutex_lock(&cpufreq_governor_mutex);
1221 1222
1222 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 1223 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
1223 if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) { 1224 if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) {
1224 up(&cpufreq_governor_sem); 1225 mutex_unlock(&cpufreq_governor_mutex);
1225 return -EBUSY; 1226 return -EBUSY;
1226 } 1227 }
1227 } 1228 }
1228 list_add(&governor->governor_list, &cpufreq_governor_list); 1229 list_add(&governor->governor_list, &cpufreq_governor_list);
1229 1230
1230 up(&cpufreq_governor_sem); 1231 mutex_unlock(&cpufreq_governor_mutex);
1231 1232
1232 return 0; 1233 return 0;
1233} 1234}
@@ -1239,9 +1240,9 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1239 if (!governor) 1240 if (!governor)
1240 return; 1241 return;
1241 1242
1242 down(&cpufreq_governor_sem); 1243 mutex_lock(&cpufreq_governor_mutex);
1243 list_del(&governor->governor_list); 1244 list_del(&governor->governor_list);
1244 up(&cpufreq_governor_sem); 1245 mutex_unlock(&cpufreq_governor_mutex);
1245 return; 1246 return;
1246} 1247}
1247EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 1248EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 39543a2bed0f..ac38766b2583 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -28,7 +28,7 @@
28#include <linux/jiffies.h> 28#include <linux/jiffies.h>
29#include <linux/kernel_stat.h> 29#include <linux/kernel_stat.h>
30#include <linux/percpu.h> 30#include <linux/percpu.h>
31 31#include <linux/mutex.h>
32/* 32/*
33 * dbs is used in this file as a shortform for demandbased switching 33 * dbs is used in this file as a shortform for demandbased switching
34 * It helps to keep variable names smaller, simpler 34 * It helps to keep variable names smaller, simpler
@@ -71,7 +71,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
71 71
72static unsigned int dbs_enable; /* number of CPUs using this policy */ 72static unsigned int dbs_enable; /* number of CPUs using this policy */
73 73
74static DECLARE_MUTEX (dbs_sem); 74static DEFINE_MUTEX (dbs_mutex);
75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
76 76
77struct dbs_tuners { 77struct dbs_tuners {
@@ -139,9 +139,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
139 if (ret != 1 ) 139 if (ret != 1 )
140 return -EINVAL; 140 return -EINVAL;
141 141
142 down(&dbs_sem); 142 mutex_lock(&dbs_mutex);
143 dbs_tuners_ins.sampling_down_factor = input; 143 dbs_tuners_ins.sampling_down_factor = input;
144 up(&dbs_sem); 144 mutex_unlock(&dbs_mutex);
145 145
146 return count; 146 return count;
147} 147}
@@ -153,14 +153,14 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
153 int ret; 153 int ret;
154 ret = sscanf (buf, "%u", &input); 154 ret = sscanf (buf, "%u", &input);
155 155
156 down(&dbs_sem); 156 mutex_lock(&dbs_mutex);
157 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { 157 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
158 up(&dbs_sem); 158 mutex_unlock(&dbs_mutex);
159 return -EINVAL; 159 return -EINVAL;
160 } 160 }
161 161
162 dbs_tuners_ins.sampling_rate = input; 162 dbs_tuners_ins.sampling_rate = input;
163 up(&dbs_sem); 163 mutex_unlock(&dbs_mutex);
164 164
165 return count; 165 return count;
166} 166}
@@ -172,16 +172,16 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
172 int ret; 172 int ret;
173 ret = sscanf (buf, "%u", &input); 173 ret = sscanf (buf, "%u", &input);
174 174
175 down(&dbs_sem); 175 mutex_lock(&dbs_mutex);
176 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 176 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
177 input < MIN_FREQUENCY_UP_THRESHOLD || 177 input < MIN_FREQUENCY_UP_THRESHOLD ||
178 input <= dbs_tuners_ins.down_threshold) { 178 input <= dbs_tuners_ins.down_threshold) {
179 up(&dbs_sem); 179 mutex_unlock(&dbs_mutex);
180 return -EINVAL; 180 return -EINVAL;
181 } 181 }
182 182
183 dbs_tuners_ins.up_threshold = input; 183 dbs_tuners_ins.up_threshold = input;
184 up(&dbs_sem); 184 mutex_unlock(&dbs_mutex);
185 185
186 return count; 186 return count;
187} 187}
@@ -193,16 +193,16 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
193 int ret; 193 int ret;
194 ret = sscanf (buf, "%u", &input); 194 ret = sscanf (buf, "%u", &input);
195 195
196 down(&dbs_sem); 196 mutex_lock(&dbs_mutex);
197 if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || 197 if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
198 input < MIN_FREQUENCY_DOWN_THRESHOLD || 198 input < MIN_FREQUENCY_DOWN_THRESHOLD ||
199 input >= dbs_tuners_ins.up_threshold) { 199 input >= dbs_tuners_ins.up_threshold) {
200 up(&dbs_sem); 200 mutex_unlock(&dbs_mutex);
201 return -EINVAL; 201 return -EINVAL;
202 } 202 }
203 203
204 dbs_tuners_ins.down_threshold = input; 204 dbs_tuners_ins.down_threshold = input;
205 up(&dbs_sem); 205 mutex_unlock(&dbs_mutex);
206 206
207 return count; 207 return count;
208} 208}
@@ -222,9 +222,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
222 if ( input > 1 ) 222 if ( input > 1 )
223 input = 1; 223 input = 1;
224 224
225 down(&dbs_sem); 225 mutex_lock(&dbs_mutex);
226 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 226 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
227 up(&dbs_sem); 227 mutex_unlock(&dbs_mutex);
228 return count; 228 return count;
229 } 229 }
230 dbs_tuners_ins.ignore_nice = input; 230 dbs_tuners_ins.ignore_nice = input;
@@ -236,7 +236,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
236 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 236 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
237 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 237 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
238 } 238 }
239 up(&dbs_sem); 239 mutex_unlock(&dbs_mutex);
240 240
241 return count; 241 return count;
242} 242}
@@ -257,9 +257,9 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
257 257
258 /* no need to test here if freq_step is zero as the user might actually 258 /* no need to test here if freq_step is zero as the user might actually
259 * want this, they would be crazy though :) */ 259 * want this, they would be crazy though :) */
260 down(&dbs_sem); 260 mutex_lock(&dbs_mutex);
261 dbs_tuners_ins.freq_step = input; 261 dbs_tuners_ins.freq_step = input;
262 up(&dbs_sem); 262 mutex_unlock(&dbs_mutex);
263 263
264 return count; 264 return count;
265} 265}
@@ -444,12 +444,12 @@ static void dbs_check_cpu(int cpu)
444static void do_dbs_timer(void *data) 444static void do_dbs_timer(void *data)
445{ 445{
446 int i; 446 int i;
447 down(&dbs_sem); 447 mutex_lock(&dbs_mutex);
448 for_each_online_cpu(i) 448 for_each_online_cpu(i)
449 dbs_check_cpu(i); 449 dbs_check_cpu(i);
450 schedule_delayed_work(&dbs_work, 450 schedule_delayed_work(&dbs_work,
451 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 451 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
452 up(&dbs_sem); 452 mutex_unlock(&dbs_mutex);
453} 453}
454 454
455static inline void dbs_timer_init(void) 455static inline void dbs_timer_init(void)
@@ -487,7 +487,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
487 if (this_dbs_info->enable) /* Already enabled */ 487 if (this_dbs_info->enable) /* Already enabled */
488 break; 488 break;
489 489
490 down(&dbs_sem); 490 mutex_lock(&dbs_mutex);
491 for_each_cpu_mask(j, policy->cpus) { 491 for_each_cpu_mask(j, policy->cpus) {
492 struct cpu_dbs_info_s *j_dbs_info; 492 struct cpu_dbs_info_s *j_dbs_info;
493 j_dbs_info = &per_cpu(cpu_dbs_info, j); 493 j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -521,11 +521,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
521 dbs_timer_init(); 521 dbs_timer_init();
522 } 522 }
523 523
524 up(&dbs_sem); 524 mutex_unlock(&dbs_mutex);
525 break; 525 break;
526 526
527 case CPUFREQ_GOV_STOP: 527 case CPUFREQ_GOV_STOP:
528 down(&dbs_sem); 528 mutex_lock(&dbs_mutex);
529 this_dbs_info->enable = 0; 529 this_dbs_info->enable = 0;
530 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 530 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
531 dbs_enable--; 531 dbs_enable--;
@@ -536,12 +536,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
536 if (dbs_enable == 0) 536 if (dbs_enable == 0)
537 dbs_timer_exit(); 537 dbs_timer_exit();
538 538
539 up(&dbs_sem); 539 mutex_unlock(&dbs_mutex);
540 540
541 break; 541 break;
542 542
543 case CPUFREQ_GOV_LIMITS: 543 case CPUFREQ_GOV_LIMITS:
544 down(&dbs_sem); 544 mutex_lock(&dbs_mutex);
545 if (policy->max < this_dbs_info->cur_policy->cur) 545 if (policy->max < this_dbs_info->cur_policy->cur)
546 __cpufreq_driver_target( 546 __cpufreq_driver_target(
547 this_dbs_info->cur_policy, 547 this_dbs_info->cur_policy,
@@ -550,7 +550,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
550 __cpufreq_driver_target( 550 __cpufreq_driver_target(
551 this_dbs_info->cur_policy, 551 this_dbs_info->cur_policy,
552 policy->min, CPUFREQ_RELATION_L); 552 policy->min, CPUFREQ_RELATION_L);
553 up(&dbs_sem); 553 mutex_unlock(&dbs_mutex);
554 break; 554 break;
555 } 555 }
556 return 0; 556 return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e69fd8dd1f1c..9ee9411f186f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -27,6 +27,7 @@
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/kernel_stat.h> 28#include <linux/kernel_stat.h>
29#include <linux/percpu.h> 29#include <linux/percpu.h>
30#include <linux/mutex.h>
30 31
31/* 32/*
32 * dbs is used in this file as a shortform for demandbased switching 33 * dbs is used in this file as a shortform for demandbased switching
@@ -70,7 +71,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
70 71
71static unsigned int dbs_enable; /* number of CPUs using this policy */ 72static unsigned int dbs_enable; /* number of CPUs using this policy */
72 73
73static DECLARE_MUTEX (dbs_sem); 74static DEFINE_MUTEX (dbs_mutex);
74static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
75 76
76struct dbs_tuners { 77struct dbs_tuners {
@@ -136,9 +137,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
136 if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 137 if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
137 return -EINVAL; 138 return -EINVAL;
138 139
139 down(&dbs_sem); 140 mutex_lock(&dbs_mutex);
140 dbs_tuners_ins.sampling_down_factor = input; 141 dbs_tuners_ins.sampling_down_factor = input;
141 up(&dbs_sem); 142 mutex_unlock(&dbs_mutex);
142 143
143 return count; 144 return count;
144} 145}
@@ -150,14 +151,14 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
150 int ret; 151 int ret;
151 ret = sscanf (buf, "%u", &input); 152 ret = sscanf (buf, "%u", &input);
152 153
153 down(&dbs_sem); 154 mutex_lock(&dbs_mutex);
154 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { 155 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
155 up(&dbs_sem); 156 mutex_unlock(&dbs_mutex);
156 return -EINVAL; 157 return -EINVAL;
157 } 158 }
158 159
159 dbs_tuners_ins.sampling_rate = input; 160 dbs_tuners_ins.sampling_rate = input;
160 up(&dbs_sem); 161 mutex_unlock(&dbs_mutex);
161 162
162 return count; 163 return count;
163} 164}
@@ -169,15 +170,15 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
169 int ret; 170 int ret;
170 ret = sscanf (buf, "%u", &input); 171 ret = sscanf (buf, "%u", &input);
171 172
172 down(&dbs_sem); 173 mutex_lock(&dbs_mutex);
173 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 174 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
174 input < MIN_FREQUENCY_UP_THRESHOLD) { 175 input < MIN_FREQUENCY_UP_THRESHOLD) {
175 up(&dbs_sem); 176 mutex_unlock(&dbs_mutex);
176 return -EINVAL; 177 return -EINVAL;
177 } 178 }
178 179
179 dbs_tuners_ins.up_threshold = input; 180 dbs_tuners_ins.up_threshold = input;
180 up(&dbs_sem); 181 mutex_unlock(&dbs_mutex);
181 182
182 return count; 183 return count;
183} 184}
@@ -197,9 +198,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
197 if ( input > 1 ) 198 if ( input > 1 )
198 input = 1; 199 input = 1;
199 200
200 down(&dbs_sem); 201 mutex_lock(&dbs_mutex);
201 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 202 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
202 up(&dbs_sem); 203 mutex_unlock(&dbs_mutex);
203 return count; 204 return count;
204 } 205 }
205 dbs_tuners_ins.ignore_nice = input; 206 dbs_tuners_ins.ignore_nice = input;
@@ -211,7 +212,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
211 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 212 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
212 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 213 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
213 } 214 }
214 up(&dbs_sem); 215 mutex_unlock(&dbs_mutex);
215 216
216 return count; 217 return count;
217} 218}
@@ -356,12 +357,12 @@ static void dbs_check_cpu(int cpu)
356static void do_dbs_timer(void *data) 357static void do_dbs_timer(void *data)
357{ 358{
358 int i; 359 int i;
359 down(&dbs_sem); 360 mutex_lock(&dbs_mutex);
360 for_each_online_cpu(i) 361 for_each_online_cpu(i)
361 dbs_check_cpu(i); 362 dbs_check_cpu(i);
362 schedule_delayed_work(&dbs_work, 363 schedule_delayed_work(&dbs_work,
363 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 364 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
364 up(&dbs_sem); 365 mutex_unlock(&dbs_mutex);
365} 366}
366 367
367static inline void dbs_timer_init(void) 368static inline void dbs_timer_init(void)
@@ -399,7 +400,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
399 if (this_dbs_info->enable) /* Already enabled */ 400 if (this_dbs_info->enable) /* Already enabled */
400 break; 401 break;
401 402
402 down(&dbs_sem); 403 mutex_lock(&dbs_mutex);
403 for_each_cpu_mask(j, policy->cpus) { 404 for_each_cpu_mask(j, policy->cpus) {
404 struct cpu_dbs_info_s *j_dbs_info; 405 struct cpu_dbs_info_s *j_dbs_info;
405 j_dbs_info = &per_cpu(cpu_dbs_info, j); 406 j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -435,11 +436,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
435 dbs_timer_init(); 436 dbs_timer_init();
436 } 437 }
437 438
438 up(&dbs_sem); 439 mutex_unlock(&dbs_mutex);
439 break; 440 break;
440 441
441 case CPUFREQ_GOV_STOP: 442 case CPUFREQ_GOV_STOP:
442 down(&dbs_sem); 443 mutex_lock(&dbs_mutex);
443 this_dbs_info->enable = 0; 444 this_dbs_info->enable = 0;
444 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 445 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
445 dbs_enable--; 446 dbs_enable--;
@@ -450,12 +451,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
450 if (dbs_enable == 0) 451 if (dbs_enable == 0)
451 dbs_timer_exit(); 452 dbs_timer_exit();
452 453
453 up(&dbs_sem); 454 mutex_unlock(&dbs_mutex);
454 455
455 break; 456 break;
456 457
457 case CPUFREQ_GOV_LIMITS: 458 case CPUFREQ_GOV_LIMITS:
458 down(&dbs_sem); 459 mutex_lock(&dbs_mutex);
459 if (policy->max < this_dbs_info->cur_policy->cur) 460 if (policy->max < this_dbs_info->cur_policy->cur)
460 __cpufreq_driver_target( 461 __cpufreq_driver_target(
461 this_dbs_info->cur_policy, 462 this_dbs_info->cur_policy,
@@ -464,7 +465,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
464 __cpufreq_driver_target( 465 __cpufreq_driver_target(
465 this_dbs_info->cur_policy, 466 this_dbs_info->cur_policy,
466 policy->min, CPUFREQ_RELATION_L); 467 policy->min, CPUFREQ_RELATION_L);
467 up(&dbs_sem); 468 mutex_unlock(&dbs_mutex);
468 break; 469 break;
469 } 470 }
470 return 0; 471 return 0;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index d32bf3593cd3..4d6fa63da598 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -21,6 +21,7 @@
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/fs.h> 22#include <linux/fs.h>
23#include <linux/sysfs.h> 23#include <linux/sysfs.h>
24#include <linux/mutex.h>
24 25
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26 27
@@ -35,7 +36,7 @@ static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */
35static unsigned int cpu_is_managed[NR_CPUS]; 36static unsigned int cpu_is_managed[NR_CPUS];
36static struct cpufreq_policy current_policy[NR_CPUS]; 37static struct cpufreq_policy current_policy[NR_CPUS];
37 38
38static DECLARE_MUTEX (userspace_sem); 39static DEFINE_MUTEX (userspace_mutex);
39 40
40#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) 41#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
41 42
@@ -70,7 +71,7 @@ static int cpufreq_set(unsigned int freq, unsigned int cpu)
70 71
71 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", cpu, freq); 72 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", cpu, freq);
72 73
73 down(&userspace_sem); 74 mutex_lock(&userspace_mutex);
74 if (!cpu_is_managed[cpu]) 75 if (!cpu_is_managed[cpu])
75 goto err; 76 goto err;
76 77
@@ -83,16 +84,16 @@ static int cpufreq_set(unsigned int freq, unsigned int cpu)
83 84
84 /* 85 /*
85 * We're safe from concurrent calls to ->target() here 86 * We're safe from concurrent calls to ->target() here
86 * as we hold the userspace_sem lock. If we were calling 87 * as we hold the userspace_mutex lock. If we were calling
87 * cpufreq_driver_target, a deadlock situation might occur: 88 * cpufreq_driver_target, a deadlock situation might occur:
88 * A: cpufreq_set (lock userspace_sem) -> cpufreq_driver_target(lock policy->lock) 89 * A: cpufreq_set (lock userspace_mutex) -> cpufreq_driver_target(lock policy->lock)
89 * B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_sem) 90 * B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_mutex)
90 */ 91 */
91 ret = __cpufreq_driver_target(&current_policy[cpu], freq, 92 ret = __cpufreq_driver_target(&current_policy[cpu], freq,
92 CPUFREQ_RELATION_L); 93 CPUFREQ_RELATION_L);
93 94
94 err: 95 err:
95 up(&userspace_sem); 96 mutex_unlock(&userspace_mutex);
96 return ret; 97 return ret;
97} 98}
98 99
@@ -134,7 +135,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
134 if (!cpu_online(cpu)) 135 if (!cpu_online(cpu))
135 return -EINVAL; 136 return -EINVAL;
136 BUG_ON(!policy->cur); 137 BUG_ON(!policy->cur);
137 down(&userspace_sem); 138 mutex_lock(&userspace_mutex);
138 cpu_is_managed[cpu] = 1; 139 cpu_is_managed[cpu] = 1;
139 cpu_min_freq[cpu] = policy->min; 140 cpu_min_freq[cpu] = policy->min;
140 cpu_max_freq[cpu] = policy->max; 141 cpu_max_freq[cpu] = policy->max;
@@ -143,20 +144,20 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
143 sysfs_create_file (&policy->kobj, &freq_attr_scaling_setspeed.attr); 144 sysfs_create_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
144 memcpy (&current_policy[cpu], policy, sizeof(struct cpufreq_policy)); 145 memcpy (&current_policy[cpu], policy, sizeof(struct cpufreq_policy));
145 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); 146 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
146 up(&userspace_sem); 147 mutex_unlock(&userspace_mutex);
147 break; 148 break;
148 case CPUFREQ_GOV_STOP: 149 case CPUFREQ_GOV_STOP:
149 down(&userspace_sem); 150 mutex_lock(&userspace_mutex);
150 cpu_is_managed[cpu] = 0; 151 cpu_is_managed[cpu] = 0;
151 cpu_min_freq[cpu] = 0; 152 cpu_min_freq[cpu] = 0;
152 cpu_max_freq[cpu] = 0; 153 cpu_max_freq[cpu] = 0;
153 cpu_set_freq[cpu] = 0; 154 cpu_set_freq[cpu] = 0;
154 sysfs_remove_file (&policy->kobj, &freq_attr_scaling_setspeed.attr); 155 sysfs_remove_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
155 dprintk("managing cpu %u stopped\n", cpu); 156 dprintk("managing cpu %u stopped\n", cpu);
156 up(&userspace_sem); 157 mutex_unlock(&userspace_mutex);
157 break; 158 break;
158 case CPUFREQ_GOV_LIMITS: 159 case CPUFREQ_GOV_LIMITS:
159 down(&userspace_sem); 160 mutex_lock(&userspace_mutex);
160 cpu_min_freq[cpu] = policy->min; 161 cpu_min_freq[cpu] = policy->min;
161 cpu_max_freq[cpu] = policy->max; 162 cpu_max_freq[cpu] = policy->max;
162 dprintk("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu], cpu_set_freq[cpu]); 163 dprintk("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu], cpu_set_freq[cpu]);
@@ -171,7 +172,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
171 CPUFREQ_RELATION_L); 172 CPUFREQ_RELATION_L);
172 } 173 }
173 memcpy (&current_policy[cpu], policy, sizeof(struct cpufreq_policy)); 174 memcpy (&current_policy[cpu], policy, sizeof(struct cpufreq_policy));
174 up(&userspace_sem); 175 mutex_unlock(&userspace_mutex);
175 break; 176 break;
176 } 177 }
177 return 0; 178 return 0;