aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2006-06-28 16:51:19 -0400
committerDave Jones <davej@redhat.com>2006-06-30 01:33:31 -0400
commit2f8a835c705794f71726eb12c06fb0f24fe07ed3 (patch)
treec16b1a4bf85df6822b9bcedc608f1db9ddf6e571 /drivers
parent7a6bc1cdd506cf81f856f0fef4e56a2ba0c5a26d (diff)
[CPUFREQ] Make ondemand sampling per CPU and remove the mutex usage in sampling path.
Make ondemand sampling per CPU and remove the mutex usage in sampling path. Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c72
1 files changed, 32 insertions, 40 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index a2add11e56f1..18b016ea5f48 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -64,6 +64,7 @@ struct cpu_dbs_info_s {
64 cputime64_t prev_cpu_idle; 64 cputime64_t prev_cpu_idle;
65 cputime64_t prev_cpu_wall; 65 cputime64_t prev_cpu_wall;
66 struct cpufreq_policy *cur_policy; 66 struct cpufreq_policy *cur_policy;
67 struct work_struct work;
67 unsigned int enable; 68 unsigned int enable;
68}; 69};
69static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 70static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
@@ -81,7 +82,7 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
81static DEFINE_MUTEX (dbs_mutex); 82static DEFINE_MUTEX (dbs_mutex);
82static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 83static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
83 84
84static struct workqueue_struct *dbs_workq; 85static struct workqueue_struct *kondemand_wq;
85 86
86struct dbs_tuners { 87struct dbs_tuners {
87 unsigned int sampling_rate; 88 unsigned int sampling_rate;
@@ -233,17 +234,15 @@ static struct attribute_group dbs_attr_group = {
233 234
234/************************** sysfs end ************************/ 235/************************** sysfs end ************************/
235 236
236static void dbs_check_cpu(int cpu) 237static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
237{ 238{
238 unsigned int idle_ticks, total_ticks; 239 unsigned int idle_ticks, total_ticks;
239 unsigned int load; 240 unsigned int load;
240 struct cpu_dbs_info_s *this_dbs_info;
241 cputime64_t cur_jiffies; 241 cputime64_t cur_jiffies;
242 242
243 struct cpufreq_policy *policy; 243 struct cpufreq_policy *policy;
244 unsigned int j; 244 unsigned int j;
245 245
246 this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
247 if (!this_dbs_info->enable) 246 if (!this_dbs_info->enable)
248 return; 247 return;
249 248
@@ -314,35 +313,29 @@ static void dbs_check_cpu(int cpu)
314 313
315static void do_dbs_timer(void *data) 314static void do_dbs_timer(void *data)
316{ 315{
317 int i; 316 unsigned int cpu = smp_processor_id();
318 lock_cpu_hotplug(); 317 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
319 mutex_lock(&dbs_mutex); 318
320 for_each_online_cpu(i) 319 dbs_check_cpu(dbs_info);
321 dbs_check_cpu(i); 320 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
322 queue_delayed_work(dbs_workq, &dbs_work, 321 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
323 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
324 mutex_unlock(&dbs_mutex);
325 unlock_cpu_hotplug();
326} 322}
327 323
328static inline void dbs_timer_init(void) 324static inline void dbs_timer_init(unsigned int cpu)
329{ 325{
330 INIT_WORK(&dbs_work, do_dbs_timer, NULL); 326 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
331 if (!dbs_workq) 327
332 dbs_workq = create_singlethread_workqueue("ondemand"); 328 INIT_WORK(&dbs_info->work, do_dbs_timer, 0);
333 if (!dbs_workq) { 329 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
334 printk(KERN_ERR "ondemand: Cannot initialize kernel thread\n"); 330 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
335 return;
336 }
337 queue_delayed_work(dbs_workq, &dbs_work,
338 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
339 return; 331 return;
340} 332}
341 333
342static inline void dbs_timer_exit(void) 334static inline void dbs_timer_exit(unsigned int cpu)
343{ 335{
344 if (dbs_workq) 336 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
345 cancel_rearming_delayed_workqueue(dbs_workq, &dbs_work); 337
338 cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work);
346} 339}
347 340
348static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 341static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -370,6 +363,16 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
370 break; 363 break;
371 364
372 mutex_lock(&dbs_mutex); 365 mutex_lock(&dbs_mutex);
366 dbs_enable++;
367 if (dbs_enable == 1) {
368 kondemand_wq = create_workqueue("kondemand");
369 if (!kondemand_wq) {
370 printk(KERN_ERR "Creation of kondemand failed\n");
371 dbs_enable--;
372 mutex_unlock(&dbs_mutex);
373 return -ENOSPC;
374 }
375 }
373 for_each_cpu_mask(j, policy->cpus) { 376 for_each_cpu_mask(j, policy->cpus) {
374 struct cpu_dbs_info_s *j_dbs_info; 377 struct cpu_dbs_info_s *j_dbs_info;
375 j_dbs_info = &per_cpu(cpu_dbs_info, j); 378 j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -380,7 +383,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
380 } 383 }
381 this_dbs_info->enable = 1; 384 this_dbs_info->enable = 1;
382 sysfs_create_group(&policy->kobj, &dbs_attr_group); 385 sysfs_create_group(&policy->kobj, &dbs_attr_group);
383 dbs_enable++;
384 /* 386 /*
385 * Start the timerschedule work, when this governor 387 * Start the timerschedule work, when this governor
386 * is used for first time 388 * is used for first time
@@ -399,23 +401,20 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
399 def_sampling_rate = MIN_STAT_SAMPLING_RATE; 401 def_sampling_rate = MIN_STAT_SAMPLING_RATE;
400 402
401 dbs_tuners_ins.sampling_rate = def_sampling_rate; 403 dbs_tuners_ins.sampling_rate = def_sampling_rate;
402 dbs_timer_init();
403 } 404 }
405 dbs_timer_init(policy->cpu);
404 406
405 mutex_unlock(&dbs_mutex); 407 mutex_unlock(&dbs_mutex);
406 break; 408 break;
407 409
408 case CPUFREQ_GOV_STOP: 410 case CPUFREQ_GOV_STOP:
409 mutex_lock(&dbs_mutex); 411 mutex_lock(&dbs_mutex);
412 dbs_timer_exit(policy->cpu);
410 this_dbs_info->enable = 0; 413 this_dbs_info->enable = 0;
411 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 414 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
412 dbs_enable--; 415 dbs_enable--;
413 /*
414 * Stop the timerschedule work, when this governor
415 * is used for first time
416 */
417 if (dbs_enable == 0) 416 if (dbs_enable == 0)
418 dbs_timer_exit(); 417 destroy_workqueue(kondemand_wq);
419 418
420 mutex_unlock(&dbs_mutex); 419 mutex_unlock(&dbs_mutex);
421 420
@@ -452,13 +451,6 @@ static int __init cpufreq_gov_dbs_init(void)
452 451
453static void __exit cpufreq_gov_dbs_exit(void) 452static void __exit cpufreq_gov_dbs_exit(void)
454{ 453{
455 /* Make sure that the scheduled work is indeed not running.
456 Assumes the timer has been cancelled first. */
457 if (dbs_workq) {
458 flush_workqueue(dbs_workq);
459 destroy_workqueue(dbs_workq);
460 }
461
462 cpufreq_unregister_governor(&cpufreq_gov_dbs); 454 cpufreq_unregister_governor(&cpufreq_gov_dbs);
463} 455}
464 456