diff options
author | Thomas Renninger <trenn@suse.de> | 2011-03-03 15:31:27 -0500 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2011-03-16 17:54:32 -0400 |
commit | 326c86deaed54ad1b364fcafe5073f563671eb58 (patch) | |
tree | fb0784c6450d3d618127df3823ffeeda400ac58f /drivers/cpufreq | |
parent | e8951251b89440644a39f2512b4f265973926b41 (diff) |
[CPUFREQ] Remove unneeded locks
There cannot be any concurrent access to these through
different cpu sysfs files anymore, because these tunables
are now all global (not per cpu).
I still have some doubts whether some of these locks
were needed at all. Anyway, let's get rid of them.
Signed-off-by: Thomas Renninger <trenn@suse.de>
Signed-off-by: Dave Jones <davej@redhat.com>
CC: cpufreq@vger.kernel.org
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 34 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 25 |
2 files changed, 6 insertions, 53 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 3182d85b3374..33b56e5c5c14 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -76,8 +76,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); | |||
76 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 76 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on | 79 | * dbs_mutex protects dbs_enable in governor start/stop. |
80 | * different CPUs. It protects dbs_enable in governor start/stop. | ||
81 | */ | 80 | */ |
82 | static DEFINE_MUTEX(dbs_mutex); | 81 | static DEFINE_MUTEX(dbs_mutex); |
83 | 82 | ||
@@ -195,10 +194,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a, | |||
195 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | 194 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
196 | return -EINVAL; | 195 | return -EINVAL; |
197 | 196 | ||
198 | mutex_lock(&dbs_mutex); | ||
199 | dbs_tuners_ins.sampling_down_factor = input; | 197 | dbs_tuners_ins.sampling_down_factor = input; |
200 | mutex_unlock(&dbs_mutex); | ||
201 | |||
202 | return count; | 198 | return count; |
203 | } | 199 | } |
204 | 200 | ||
@@ -212,10 +208,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, | |||
212 | if (ret != 1) | 208 | if (ret != 1) |
213 | return -EINVAL; | 209 | return -EINVAL; |
214 | 210 | ||
215 | mutex_lock(&dbs_mutex); | ||
216 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); | 211 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); |
217 | mutex_unlock(&dbs_mutex); | ||
218 | |||
219 | return count; | 212 | return count; |
220 | } | 213 | } |
221 | 214 | ||
@@ -226,16 +219,11 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | |||
226 | int ret; | 219 | int ret; |
227 | ret = sscanf(buf, "%u", &input); | 220 | ret = sscanf(buf, "%u", &input); |
228 | 221 | ||
229 | mutex_lock(&dbs_mutex); | ||
230 | if (ret != 1 || input > 100 || | 222 | if (ret != 1 || input > 100 || |
231 | input <= dbs_tuners_ins.down_threshold) { | 223 | input <= dbs_tuners_ins.down_threshold) |
232 | mutex_unlock(&dbs_mutex); | ||
233 | return -EINVAL; | 224 | return -EINVAL; |
234 | } | ||
235 | 225 | ||
236 | dbs_tuners_ins.up_threshold = input; | 226 | dbs_tuners_ins.up_threshold = input; |
237 | mutex_unlock(&dbs_mutex); | ||
238 | |||
239 | return count; | 227 | return count; |
240 | } | 228 | } |
241 | 229 | ||
@@ -246,17 +234,12 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, | |||
246 | int ret; | 234 | int ret; |
247 | ret = sscanf(buf, "%u", &input); | 235 | ret = sscanf(buf, "%u", &input); |
248 | 236 | ||
249 | mutex_lock(&dbs_mutex); | ||
250 | /* cannot be lower than 11 otherwise freq will not fall */ | 237 | /* cannot be lower than 11 otherwise freq will not fall */ |
251 | if (ret != 1 || input < 11 || input > 100 || | 238 | if (ret != 1 || input < 11 || input > 100 || |
252 | input >= dbs_tuners_ins.up_threshold) { | 239 | input >= dbs_tuners_ins.up_threshold) |
253 | mutex_unlock(&dbs_mutex); | ||
254 | return -EINVAL; | 240 | return -EINVAL; |
255 | } | ||
256 | 241 | ||
257 | dbs_tuners_ins.down_threshold = input; | 242 | dbs_tuners_ins.down_threshold = input; |
258 | mutex_unlock(&dbs_mutex); | ||
259 | |||
260 | return count; | 243 | return count; |
261 | } | 244 | } |
262 | 245 | ||
@@ -275,11 +258,9 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
275 | if (input > 1) | 258 | if (input > 1) |
276 | input = 1; | 259 | input = 1; |
277 | 260 | ||
278 | mutex_lock(&dbs_mutex); | 261 | if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ |
279 | if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ | ||
280 | mutex_unlock(&dbs_mutex); | ||
281 | return count; | 262 | return count; |
282 | } | 263 | |
283 | dbs_tuners_ins.ignore_nice = input; | 264 | dbs_tuners_ins.ignore_nice = input; |
284 | 265 | ||
285 | /* we need to re-evaluate prev_cpu_idle */ | 266 | /* we need to re-evaluate prev_cpu_idle */ |
@@ -291,8 +272,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
291 | if (dbs_tuners_ins.ignore_nice) | 272 | if (dbs_tuners_ins.ignore_nice) |
292 | dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; | 273 | dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; |
293 | } | 274 | } |
294 | mutex_unlock(&dbs_mutex); | ||
295 | |||
296 | return count; | 275 | return count; |
297 | } | 276 | } |
298 | 277 | ||
@@ -311,10 +290,7 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b, | |||
311 | 290 | ||
312 | /* no need to test here if freq_step is zero as the user might actually | 291 | /* no need to test here if freq_step is zero as the user might actually |
313 | * want this, they would be crazy though :) */ | 292 | * want this, they would be crazy though :) */ |
314 | mutex_lock(&dbs_mutex); | ||
315 | dbs_tuners_ins.freq_step = input; | 293 | dbs_tuners_ins.freq_step = input; |
316 | mutex_unlock(&dbs_mutex); | ||
317 | |||
318 | return count; | 294 | return count; |
319 | } | 295 | } |
320 | 296 | ||
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index ba18205be12b..891360edecdd 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -99,8 +99,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); | |||
99 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 99 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on | 102 | * dbs_mutex protects dbs_enable in governor start/stop. |
103 | * different CPUs. It protects dbs_enable in governor start/stop. | ||
104 | */ | 103 | */ |
105 | static DEFINE_MUTEX(dbs_mutex); | 104 | static DEFINE_MUTEX(dbs_mutex); |
106 | 105 | ||
@@ -265,11 +264,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, | |||
265 | ret = sscanf(buf, "%u", &input); | 264 | ret = sscanf(buf, "%u", &input); |
266 | if (ret != 1) | 265 | if (ret != 1) |
267 | return -EINVAL; | 266 | return -EINVAL; |
268 | |||
269 | mutex_lock(&dbs_mutex); | ||
270 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); | 267 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); |
271 | mutex_unlock(&dbs_mutex); | ||
272 | |||
273 | return count; | 268 | return count; |
274 | } | 269 | } |
275 | 270 | ||
@@ -282,11 +277,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, | |||
282 | ret = sscanf(buf, "%u", &input); | 277 | ret = sscanf(buf, "%u", &input); |
283 | if (ret != 1) | 278 | if (ret != 1) |
284 | return -EINVAL; | 279 | return -EINVAL; |
285 | |||
286 | mutex_lock(&dbs_mutex); | ||
287 | dbs_tuners_ins.io_is_busy = !!input; | 280 | dbs_tuners_ins.io_is_busy = !!input; |
288 | mutex_unlock(&dbs_mutex); | ||
289 | |||
290 | return count; | 281 | return count; |
291 | } | 282 | } |
292 | 283 | ||
@@ -301,11 +292,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | |||
301 | input < MIN_FREQUENCY_UP_THRESHOLD) { | 292 | input < MIN_FREQUENCY_UP_THRESHOLD) { |
302 | return -EINVAL; | 293 | return -EINVAL; |
303 | } | 294 | } |
304 | |||
305 | mutex_lock(&dbs_mutex); | ||
306 | dbs_tuners_ins.up_threshold = input; | 295 | dbs_tuners_ins.up_threshold = input; |
307 | mutex_unlock(&dbs_mutex); | ||
308 | |||
309 | return count; | 296 | return count; |
310 | } | 297 | } |
311 | 298 | ||
@@ -318,7 +305,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a, | |||
318 | 305 | ||
319 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | 306 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
320 | return -EINVAL; | 307 | return -EINVAL; |
321 | mutex_lock(&dbs_mutex); | ||
322 | dbs_tuners_ins.sampling_down_factor = input; | 308 | dbs_tuners_ins.sampling_down_factor = input; |
323 | 309 | ||
324 | /* Reset down sampling multiplier in case it was active */ | 310 | /* Reset down sampling multiplier in case it was active */ |
@@ -327,8 +313,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a, | |||
327 | dbs_info = &per_cpu(od_cpu_dbs_info, j); | 313 | dbs_info = &per_cpu(od_cpu_dbs_info, j); |
328 | dbs_info->rate_mult = 1; | 314 | dbs_info->rate_mult = 1; |
329 | } | 315 | } |
330 | mutex_unlock(&dbs_mutex); | ||
331 | |||
332 | return count; | 316 | return count; |
333 | } | 317 | } |
334 | 318 | ||
@@ -347,9 +331,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
347 | if (input > 1) | 331 | if (input > 1) |
348 | input = 1; | 332 | input = 1; |
349 | 333 | ||
350 | mutex_lock(&dbs_mutex); | ||
351 | if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ | 334 | if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ |
352 | mutex_unlock(&dbs_mutex); | ||
353 | return count; | 335 | return count; |
354 | } | 336 | } |
355 | dbs_tuners_ins.ignore_nice = input; | 337 | dbs_tuners_ins.ignore_nice = input; |
@@ -364,8 +346,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
364 | dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; | 346 | dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; |
365 | 347 | ||
366 | } | 348 | } |
367 | mutex_unlock(&dbs_mutex); | ||
368 | |||
369 | return count; | 349 | return count; |
370 | } | 350 | } |
371 | 351 | ||
@@ -382,11 +362,8 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, | |||
382 | if (input > 1000) | 362 | if (input > 1000) |
383 | input = 1000; | 363 | input = 1000; |
384 | 364 | ||
385 | mutex_lock(&dbs_mutex); | ||
386 | dbs_tuners_ins.powersave_bias = input; | 365 | dbs_tuners_ins.powersave_bias = input; |
387 | ondemand_powersave_bias_init(); | 366 | ondemand_powersave_bias_init(); |
388 | mutex_unlock(&dbs_mutex); | ||
389 | |||
390 | return count; | 367 | return count; |
391 | } | 368 | } |
392 | 369 | ||