diff options
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 12 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 12 |
2 files changed, 24 insertions, 0 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index e07a35487bde..8878a154ed43 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -72,6 +72,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | |||
72 | 72 | ||
73 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 73 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
74 | 74 | ||
75 | /* | ||
76 | * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug | ||
77 | * lock and dbs_mutex. cpu_hotplug lock should always be held before | ||
78 | * dbs_mutex. If any function that can potentially take cpu_hotplug lock | ||
79 | * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then | ||
80 | * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock | ||
81 | * is recursive for the same process. -Venki | ||
82 | */ | ||
75 | static DEFINE_MUTEX (dbs_mutex); | 83 | static DEFINE_MUTEX (dbs_mutex); |
76 | static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); | 84 | static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); |
77 | 85 | ||
@@ -414,12 +422,14 @@ static void dbs_check_cpu(int cpu) | |||
414 | static void do_dbs_timer(void *data) | 422 | static void do_dbs_timer(void *data) |
415 | { | 423 | { |
416 | int i; | 424 | int i; |
425 | lock_cpu_hotplug(); | ||
417 | mutex_lock(&dbs_mutex); | 426 | mutex_lock(&dbs_mutex); |
418 | for_each_online_cpu(i) | 427 | for_each_online_cpu(i) |
419 | dbs_check_cpu(i); | 428 | dbs_check_cpu(i); |
420 | schedule_delayed_work(&dbs_work, | 429 | schedule_delayed_work(&dbs_work, |
421 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 430 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); |
422 | mutex_unlock(&dbs_mutex); | 431 | mutex_unlock(&dbs_mutex); |
432 | unlock_cpu_hotplug(); | ||
423 | } | 433 | } |
424 | 434 | ||
425 | static inline void dbs_timer_init(void) | 435 | static inline void dbs_timer_init(void) |
@@ -514,6 +524,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
514 | break; | 524 | break; |
515 | 525 | ||
516 | case CPUFREQ_GOV_LIMITS: | 526 | case CPUFREQ_GOV_LIMITS: |
527 | lock_cpu_hotplug(); | ||
517 | mutex_lock(&dbs_mutex); | 528 | mutex_lock(&dbs_mutex); |
518 | if (policy->max < this_dbs_info->cur_policy->cur) | 529 | if (policy->max < this_dbs_info->cur_policy->cur) |
519 | __cpufreq_driver_target( | 530 | __cpufreq_driver_target( |
@@ -524,6 +535,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
524 | this_dbs_info->cur_policy, | 535 | this_dbs_info->cur_policy, |
525 | policy->min, CPUFREQ_RELATION_L); | 536 | policy->min, CPUFREQ_RELATION_L); |
526 | mutex_unlock(&dbs_mutex); | 537 | mutex_unlock(&dbs_mutex); |
538 | unlock_cpu_hotplug(); | ||
527 | break; | 539 | break; |
528 | } | 540 | } |
529 | return 0; | 541 | return 0; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 3e6ffcaa5af4..4d308410b60e 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -71,6 +71,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | |||
71 | 71 | ||
72 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 72 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
73 | 73 | ||
74 | /* | ||
75 | * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug | ||
76 | * lock and dbs_mutex. cpu_hotplug lock should always be held before | ||
77 | * dbs_mutex. If any function that can potentially take cpu_hotplug lock | ||
78 | * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then | ||
79 | * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock | ||
80 | * is recursive for the same process. -Venki | ||
81 | */ | ||
74 | static DEFINE_MUTEX (dbs_mutex); | 82 | static DEFINE_MUTEX (dbs_mutex); |
75 | static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); | 83 | static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); |
76 | 84 | ||
@@ -363,12 +371,14 @@ static void dbs_check_cpu(int cpu) | |||
363 | static void do_dbs_timer(void *data) | 371 | static void do_dbs_timer(void *data) |
364 | { | 372 | { |
365 | int i; | 373 | int i; |
374 | lock_cpu_hotplug(); | ||
366 | mutex_lock(&dbs_mutex); | 375 | mutex_lock(&dbs_mutex); |
367 | for_each_online_cpu(i) | 376 | for_each_online_cpu(i) |
368 | dbs_check_cpu(i); | 377 | dbs_check_cpu(i); |
369 | queue_delayed_work(dbs_workq, &dbs_work, | 378 | queue_delayed_work(dbs_workq, &dbs_work, |
370 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 379 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); |
371 | mutex_unlock(&dbs_mutex); | 380 | mutex_unlock(&dbs_mutex); |
381 | unlock_cpu_hotplug(); | ||
372 | } | 382 | } |
373 | 383 | ||
374 | static inline void dbs_timer_init(void) | 384 | static inline void dbs_timer_init(void) |
@@ -469,6 +479,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
469 | break; | 479 | break; |
470 | 480 | ||
471 | case CPUFREQ_GOV_LIMITS: | 481 | case CPUFREQ_GOV_LIMITS: |
482 | lock_cpu_hotplug(); | ||
472 | mutex_lock(&dbs_mutex); | 483 | mutex_lock(&dbs_mutex); |
473 | if (policy->max < this_dbs_info->cur_policy->cur) | 484 | if (policy->max < this_dbs_info->cur_policy->cur) |
474 | __cpufreq_driver_target( | 485 | __cpufreq_driver_target( |
@@ -479,6 +490,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
479 | this_dbs_info->cur_policy, | 490 | this_dbs_info->cur_policy, |
480 | policy->min, CPUFREQ_RELATION_L); | 491 | policy->min, CPUFREQ_RELATION_L); |
481 | mutex_unlock(&dbs_mutex); | 492 | mutex_unlock(&dbs_mutex); |
493 | unlock_cpu_hotplug(); | ||
482 | break; | 494 | break; |
483 | } | 495 | } |
484 | return 0; | 496 | return 0; |