aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
authorThomas Renninger <trenn@suse.de>2011-03-03 15:31:27 -0500
committerDave Jones <davej@redhat.com>2011-03-16 17:54:32 -0400
commit326c86deaed54ad1b364fcafe5073f563671eb58 (patch)
treefb0784c6450d3d618127df3823ffeeda400ac58f /drivers/cpufreq/cpufreq_ondemand.c
parente8951251b89440644a39f2512b4f265973926b41 (diff)
[CPUFREQ] Remove unneeded locks
There cannot be any concurrent access to these through different cpu sysfs files anymore, because these tunables are now all global (not per cpu). I still have some doubts whether some of these locks were needed at all. Anyway, let's get rid of them. Signed-off-by: Thomas Renninger <trenn@suse.de> Signed-off-by: Dave Jones <davej@redhat.com> CC: cpufreq@vger.kernel.org
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c25
1 files changed, 1 insertions, 24 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index ba18205be12b..891360edecdd 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -99,8 +99,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
99static unsigned int dbs_enable; /* number of CPUs using this policy */ 99static unsigned int dbs_enable; /* number of CPUs using this policy */
100 100
101/* 101/*
102 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on 102 * dbs_mutex protects dbs_enable in governor start/stop.
103 * different CPUs. It protects dbs_enable in governor start/stop.
104 */ 103 */
105static DEFINE_MUTEX(dbs_mutex); 104static DEFINE_MUTEX(dbs_mutex);
106 105
@@ -265,11 +264,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
265 ret = sscanf(buf, "%u", &input); 264 ret = sscanf(buf, "%u", &input);
266 if (ret != 1) 265 if (ret != 1)
267 return -EINVAL; 266 return -EINVAL;
268
269 mutex_lock(&dbs_mutex);
270 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); 267 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
271 mutex_unlock(&dbs_mutex);
272
273 return count; 268 return count;
274} 269}
275 270
@@ -282,11 +277,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
282 ret = sscanf(buf, "%u", &input); 277 ret = sscanf(buf, "%u", &input);
283 if (ret != 1) 278 if (ret != 1)
284 return -EINVAL; 279 return -EINVAL;
285
286 mutex_lock(&dbs_mutex);
287 dbs_tuners_ins.io_is_busy = !!input; 280 dbs_tuners_ins.io_is_busy = !!input;
288 mutex_unlock(&dbs_mutex);
289
290 return count; 281 return count;
291} 282}
292 283
@@ -301,11 +292,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
301 input < MIN_FREQUENCY_UP_THRESHOLD) { 292 input < MIN_FREQUENCY_UP_THRESHOLD) {
302 return -EINVAL; 293 return -EINVAL;
303 } 294 }
304
305 mutex_lock(&dbs_mutex);
306 dbs_tuners_ins.up_threshold = input; 295 dbs_tuners_ins.up_threshold = input;
307 mutex_unlock(&dbs_mutex);
308
309 return count; 296 return count;
310} 297}
311 298
@@ -318,7 +305,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
318 305
319 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 306 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
320 return -EINVAL; 307 return -EINVAL;
321 mutex_lock(&dbs_mutex);
322 dbs_tuners_ins.sampling_down_factor = input; 308 dbs_tuners_ins.sampling_down_factor = input;
323 309
324 /* Reset down sampling multiplier in case it was active */ 310 /* Reset down sampling multiplier in case it was active */
@@ -327,8 +313,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
327 dbs_info = &per_cpu(od_cpu_dbs_info, j); 313 dbs_info = &per_cpu(od_cpu_dbs_info, j);
328 dbs_info->rate_mult = 1; 314 dbs_info->rate_mult = 1;
329 } 315 }
330 mutex_unlock(&dbs_mutex);
331
332 return count; 316 return count;
333} 317}
334 318
@@ -347,9 +331,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
347 if (input > 1) 331 if (input > 1)
348 input = 1; 332 input = 1;
349 333
350 mutex_lock(&dbs_mutex);
351 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ 334 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
352 mutex_unlock(&dbs_mutex);
353 return count; 335 return count;
354 } 336 }
355 dbs_tuners_ins.ignore_nice = input; 337 dbs_tuners_ins.ignore_nice = input;
@@ -364,8 +346,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
364 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 346 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
365 347
366 } 348 }
367 mutex_unlock(&dbs_mutex);
368
369 return count; 349 return count;
370} 350}
371 351
@@ -382,11 +362,8 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
382 if (input > 1000) 362 if (input > 1000)
383 input = 1000; 363 input = 1000;
384 364
385 mutex_lock(&dbs_mutex);
386 dbs_tuners_ins.powersave_bias = input; 365 dbs_tuners_ins.powersave_bias = input;
387 ondemand_powersave_bias_init(); 366 ondemand_powersave_bias_init();
388 mutex_unlock(&dbs_mutex);
389
390 return count; 367 return count;
391} 368}
392 369