aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/thermal
diff options
context:
space:
mode:
authorhongbo.zhang <hongbo.zhang@linaro.com>2012-10-30 12:48:59 -0400
committerZhang Rui <rui.zhang@intel.com>2012-11-07 01:52:27 -0500
commit160b7d8048b87cb594e1a22b5345b468b6c2c40e (patch)
treedf54e9c1d6f594d3d7de7086736a54a29be5fc52 /drivers/thermal
parent9c51b05a7852183ba9654ca850bee97d38e948d5 (diff)
Thermal: Remove the cooling_cpufreq_list.
Problem of using this list is that the cpufreq_get_max_state callback will be called when register cooling device by thermal_cooling_device_register, but this list isn't ready at this moment. What's more, there is no need to maintain such a list, we can get cpufreq_cooling_device instance by the private thermal_cooling_device.devdata. Signed-off-by: hongbo.zhang <hongbo.zhang@linaro.com> Reviewed-by: Francesco Lavra <francescolavra.fl@gmail.com> Reviewed-by: Amit Daniel Kachhap <amit.kachhap@linaro.org> Signed-off-by: Zhang Rui <rui.zhang@intel.com>
Diffstat (limited to 'drivers/thermal')
-rw-r--r--drivers/thermal/cpu_cooling.c91
1 files changed, 19 insertions, 72 deletions
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index bfd62b7f32a7..392d57d8590d 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -58,8 +58,9 @@ struct cpufreq_cooling_device {
58}; 58};
59static LIST_HEAD(cooling_cpufreq_list); 59static LIST_HEAD(cooling_cpufreq_list);
60static DEFINE_IDR(cpufreq_idr); 60static DEFINE_IDR(cpufreq_idr);
61static DEFINE_MUTEX(cooling_cpufreq_lock);
61 62
62static struct mutex cooling_cpufreq_lock; 63static unsigned int cpufreq_dev_count;
63 64
64/* notify_table passes value to the CPUFREQ_ADJUST callback function. */ 65/* notify_table passes value to the CPUFREQ_ADJUST callback function. */
65#define NOTIFY_INVALID NULL 66#define NOTIFY_INVALID NULL
@@ -240,28 +241,18 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
240static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, 241static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
241 unsigned long *state) 242 unsigned long *state)
242{ 243{
243 int ret = -EINVAL, i = 0; 244 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
244 struct cpufreq_cooling_device *cpufreq_device; 245 struct cpumask *maskPtr = &cpufreq_device->allowed_cpus;
245 struct cpumask *maskPtr;
246 unsigned int cpu; 246 unsigned int cpu;
247 struct cpufreq_frequency_table *table; 247 struct cpufreq_frequency_table *table;
248 unsigned long count = 0; 248 unsigned long count = 0;
249 int i = 0;
249 250
250 mutex_lock(&cooling_cpufreq_lock);
251 list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
252 if (cpufreq_device && cpufreq_device->cool_dev == cdev)
253 break;
254 }
255 if (cpufreq_device == NULL)
256 goto return_get_max_state;
257
258 maskPtr = &cpufreq_device->allowed_cpus;
259 cpu = cpumask_any(maskPtr); 251 cpu = cpumask_any(maskPtr);
260 table = cpufreq_frequency_get_table(cpu); 252 table = cpufreq_frequency_get_table(cpu);
261 if (!table) { 253 if (!table) {
262 *state = 0; 254 *state = 0;
263 ret = 0; 255 return 0;
264 goto return_get_max_state;
265 } 256 }
266 257
267 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 258 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
@@ -272,12 +263,10 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
272 263
273 if (count > 0) { 264 if (count > 0) {
274 *state = --count; 265 *state = --count;
275 ret = 0; 266 return 0;
276 } 267 }
277 268
278return_get_max_state: 269 return -EINVAL;
279 mutex_unlock(&cooling_cpufreq_lock);
280 return ret;
281} 270}
282 271
283/** 272/**
@@ -288,20 +277,10 @@ return_get_max_state:
288static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, 277static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
289 unsigned long *state) 278 unsigned long *state)
290{ 279{
291 int ret = -EINVAL; 280 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
292 struct cpufreq_cooling_device *cpufreq_device;
293 281
294 mutex_lock(&cooling_cpufreq_lock); 282 *state = cpufreq_device->cpufreq_state;
295 list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) { 283 return 0;
296 if (cpufreq_device && cpufreq_device->cool_dev == cdev) {
297 *state = cpufreq_device->cpufreq_state;
298 ret = 0;
299 break;
300 }
301 }
302 mutex_unlock(&cooling_cpufreq_lock);
303
304 return ret;
305} 284}
306 285
307/** 286/**
@@ -312,22 +291,9 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
312static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, 291static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
313 unsigned long state) 292 unsigned long state)
314{ 293{
315 int ret = -EINVAL; 294 struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
316 struct cpufreq_cooling_device *cpufreq_device;
317 295
318 mutex_lock(&cooling_cpufreq_lock); 296 return cpufreq_apply_cooling(cpufreq_device, state);
319 list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
320 if (cpufreq_device && cpufreq_device->cool_dev == cdev) {
321 ret = 0;
322 break;
323 }
324 }
325 if (!ret)
326 ret = cpufreq_apply_cooling(cpufreq_device, state);
327
328 mutex_unlock(&cooling_cpufreq_lock);
329
330 return ret;
331} 297}
332 298
333/* Bind cpufreq callbacks to thermal cooling device ops */ 299/* Bind cpufreq callbacks to thermal cooling device ops */
@@ -351,14 +317,11 @@ struct thermal_cooling_device *cpufreq_cooling_register(
351{ 317{
352 struct thermal_cooling_device *cool_dev; 318 struct thermal_cooling_device *cool_dev;
353 struct cpufreq_cooling_device *cpufreq_dev = NULL; 319 struct cpufreq_cooling_device *cpufreq_dev = NULL;
354 unsigned int cpufreq_dev_count = 0, min = 0, max = 0; 320 unsigned int min = 0, max = 0;
355 char dev_name[THERMAL_NAME_LENGTH]; 321 char dev_name[THERMAL_NAME_LENGTH];
356 int ret = 0, i; 322 int ret = 0, i;
357 struct cpufreq_policy policy; 323 struct cpufreq_policy policy;
358 324
359 list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node)
360 cpufreq_dev_count++;
361
362 /*Verify that all the clip cpus have same freq_min, freq_max limit*/ 325 /*Verify that all the clip cpus have same freq_min, freq_max limit*/
363 for_each_cpu(i, clip_cpus) { 326 for_each_cpu(i, clip_cpus) {
364 /*continue if cpufreq policy not found and not return error*/ 327 /*continue if cpufreq policy not found and not return error*/
@@ -380,9 +343,6 @@ struct thermal_cooling_device *cpufreq_cooling_register(
380 343
381 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); 344 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
382 345
383 if (cpufreq_dev_count == 0)
384 mutex_init(&cooling_cpufreq_lock);
385
386 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); 346 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
387 if (ret) { 347 if (ret) {
388 kfree(cpufreq_dev); 348 kfree(cpufreq_dev);
@@ -401,12 +361,12 @@ struct thermal_cooling_device *cpufreq_cooling_register(
401 cpufreq_dev->cool_dev = cool_dev; 361 cpufreq_dev->cool_dev = cool_dev;
402 cpufreq_dev->cpufreq_state = 0; 362 cpufreq_dev->cpufreq_state = 0;
403 mutex_lock(&cooling_cpufreq_lock); 363 mutex_lock(&cooling_cpufreq_lock);
404 list_add_tail(&cpufreq_dev->node, &cooling_cpufreq_list);
405 364
406 /* Register the notifier for first cpufreq cooling device */ 365 /* Register the notifier for first cpufreq cooling device */
407 if (cpufreq_dev_count == 0) 366 if (cpufreq_dev_count == 0)
408 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 367 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
409 CPUFREQ_POLICY_NOTIFIER); 368 CPUFREQ_POLICY_NOTIFIER);
369 cpufreq_dev_count++;
410 370
411 mutex_unlock(&cooling_cpufreq_lock); 371 mutex_unlock(&cooling_cpufreq_lock);
412 return cool_dev; 372 return cool_dev;
@@ -419,33 +379,20 @@ EXPORT_SYMBOL(cpufreq_cooling_register);
419 */ 379 */
420void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 380void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
421{ 381{
422 struct cpufreq_cooling_device *cpufreq_dev = NULL; 382 struct cpufreq_cooling_device *cpufreq_dev = cdev->devdata;
423 unsigned int cpufreq_dev_count = 0;
424 383
425 mutex_lock(&cooling_cpufreq_lock); 384 mutex_lock(&cooling_cpufreq_lock);
426 list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node) { 385 cpufreq_dev_count--;
427 if (cpufreq_dev && cpufreq_dev->cool_dev == cdev)
428 break;
429 cpufreq_dev_count++;
430 }
431
432 if (!cpufreq_dev || cpufreq_dev->cool_dev != cdev) {
433 mutex_unlock(&cooling_cpufreq_lock);
434 return;
435 }
436
437 list_del(&cpufreq_dev->node);
438 386
439 /* Unregister the notifier for the last cpufreq cooling device */ 387 /* Unregister the notifier for the last cpufreq cooling device */
440 if (cpufreq_dev_count == 1) { 388 if (cpufreq_dev_count == 0) {
441 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, 389 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
442 CPUFREQ_POLICY_NOTIFIER); 390 CPUFREQ_POLICY_NOTIFIER);
443 } 391 }
444 mutex_unlock(&cooling_cpufreq_lock); 392 mutex_unlock(&cooling_cpufreq_lock);
393
445 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 394 thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
446 release_idr(&cpufreq_idr, cpufreq_dev->id); 395 release_idr(&cpufreq_idr, cpufreq_dev->id);
447 if (cpufreq_dev_count == 1)
448 mutex_destroy(&cooling_cpufreq_lock);
449 kfree(cpufreq_dev); 396 kfree(cpufreq_dev);
450} 397}
451EXPORT_SYMBOL(cpufreq_cooling_unregister); 398EXPORT_SYMBOL(cpufreq_cooling_unregister);