aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2006-06-21 18:18:34 -0400
committerDave Jones <davej@redhat.com>2006-06-21 18:30:26 -0400
commit4ec223d02f4d5f5a3129edc0e3d22550d6ac8a32 (patch)
tree753cec643fa59ccda64a95fa5436956e481c1137 /drivers
parent9ed059e1551bf36092215b965838502ac21f42e4 (diff)
[CPUFREQ] Fix ondemand vs suspend deadlock
Rootcaused the bug to a deadlock in cpufreq and ondemand. Due to non-existent ordering between cpu_hotplug lock and dbs_mutex. Basically a race condition between cpu_down() and do_dbs_timer(). cpu_down() flow: * cpu_down() call for CPU 1 * Takes hot plug lock * Calls pre down notifier * cpufreq notifier handler calls cpufreq_driver_target() which takes cpu_hotplug lock again. OK as cpu_hotplug lock is recursive in same process context * CPU 1 goes down * Calls post down notifier * cpufreq notifier handler calls ondemand event stop which takes dbs_mutex So, cpu_hotplug lock is taken before dbs_mutex in this flow. do_dbs_timer is triggerred by a periodic timer event. It first takes dbs_mutex and then takes cpu_hotplug lock in cpufreq_driver_target(). Note the reverse order here compared to above. So, if this timer event happens at right moment during cpu_down, system will deadlok. Attached patch fixes the issue for both ondemand and conservative. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c12
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c12
2 files changed, 24 insertions, 0 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index e07a35487bde..8878a154ed43 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -72,6 +72,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
72 72
73static unsigned int dbs_enable; /* number of CPUs using this policy */ 73static unsigned int dbs_enable; /* number of CPUs using this policy */
74 74
75/*
76 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
77 * lock and dbs_mutex. cpu_hotplug lock should always be held before
78 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
79 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
80 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
81 * is recursive for the same process. -Venki
82 */
75static DEFINE_MUTEX (dbs_mutex); 83static DEFINE_MUTEX (dbs_mutex);
76static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 84static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
77 85
@@ -414,12 +422,14 @@ static void dbs_check_cpu(int cpu)
414static void do_dbs_timer(void *data) 422static void do_dbs_timer(void *data)
415{ 423{
416 int i; 424 int i;
425 lock_cpu_hotplug();
417 mutex_lock(&dbs_mutex); 426 mutex_lock(&dbs_mutex);
418 for_each_online_cpu(i) 427 for_each_online_cpu(i)
419 dbs_check_cpu(i); 428 dbs_check_cpu(i);
420 schedule_delayed_work(&dbs_work, 429 schedule_delayed_work(&dbs_work,
421 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 430 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
422 mutex_unlock(&dbs_mutex); 431 mutex_unlock(&dbs_mutex);
432 unlock_cpu_hotplug();
423} 433}
424 434
425static inline void dbs_timer_init(void) 435static inline void dbs_timer_init(void)
@@ -514,6 +524,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
514 break; 524 break;
515 525
516 case CPUFREQ_GOV_LIMITS: 526 case CPUFREQ_GOV_LIMITS:
527 lock_cpu_hotplug();
517 mutex_lock(&dbs_mutex); 528 mutex_lock(&dbs_mutex);
518 if (policy->max < this_dbs_info->cur_policy->cur) 529 if (policy->max < this_dbs_info->cur_policy->cur)
519 __cpufreq_driver_target( 530 __cpufreq_driver_target(
@@ -524,6 +535,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
524 this_dbs_info->cur_policy, 535 this_dbs_info->cur_policy,
525 policy->min, CPUFREQ_RELATION_L); 536 policy->min, CPUFREQ_RELATION_L);
526 mutex_unlock(&dbs_mutex); 537 mutex_unlock(&dbs_mutex);
538 unlock_cpu_hotplug();
527 break; 539 break;
528 } 540 }
529 return 0; 541 return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3e6ffcaa5af4..4d308410b60e 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -71,6 +71,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
71 71
72static unsigned int dbs_enable; /* number of CPUs using this policy */ 72static unsigned int dbs_enable; /* number of CPUs using this policy */
73 73
74/*
75 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
76 * lock and dbs_mutex. cpu_hotplug lock should always be held before
77 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
78 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
79 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
80 * is recursive for the same process. -Venki
81 */
74static DEFINE_MUTEX (dbs_mutex); 82static DEFINE_MUTEX (dbs_mutex);
75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 83static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
76 84
@@ -363,12 +371,14 @@ static void dbs_check_cpu(int cpu)
363static void do_dbs_timer(void *data) 371static void do_dbs_timer(void *data)
364{ 372{
365 int i; 373 int i;
374 lock_cpu_hotplug();
366 mutex_lock(&dbs_mutex); 375 mutex_lock(&dbs_mutex);
367 for_each_online_cpu(i) 376 for_each_online_cpu(i)
368 dbs_check_cpu(i); 377 dbs_check_cpu(i);
369 queue_delayed_work(dbs_workq, &dbs_work, 378 queue_delayed_work(dbs_workq, &dbs_work,
370 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 379 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
371 mutex_unlock(&dbs_mutex); 380 mutex_unlock(&dbs_mutex);
381 unlock_cpu_hotplug();
372} 382}
373 383
374static inline void dbs_timer_init(void) 384static inline void dbs_timer_init(void)
@@ -469,6 +479,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
469 break; 479 break;
470 480
471 case CPUFREQ_GOV_LIMITS: 481 case CPUFREQ_GOV_LIMITS:
482 lock_cpu_hotplug();
472 mutex_lock(&dbs_mutex); 483 mutex_lock(&dbs_mutex);
473 if (policy->max < this_dbs_info->cur_policy->cur) 484 if (policy->max < this_dbs_info->cur_policy->cur)
474 __cpufreq_driver_target( 485 __cpufreq_driver_target(
@@ -479,6 +490,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
479 this_dbs_info->cur_policy, 490 this_dbs_info->cur_policy,
480 policy->min, CPUFREQ_RELATION_L); 491 policy->min, CPUFREQ_RELATION_L);
481 mutex_unlock(&dbs_mutex); 492 mutex_unlock(&dbs_mutex);
493 unlock_cpu_hotplug();
482 break; 494 break;
483 } 495 }
484 return 0; 496 return 0;