aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2007-02-05 19:12:44 -0500
committerDave Jones <davej@redhat.com>2007-02-10 20:01:47 -0500
commit529af7a14f04f92213bac371931a2b2b060c63fa (patch)
treef5abea57e39f694ab3dd3f65d0303f10f38c73c3 /drivers/cpufreq/cpufreq_ondemand.c
parent5a01f2e8f3ac134e24144d74bb48a60236f7024d (diff)
[CPUFREQ] ondemand governor restructure the work callback
Restructure the delayed_work callback in ondemand. This eliminates the need for smp_processor_id in the callback function and also helps in proper locking and avoiding flush_workqueue when stopping the governor (done in subsequent patch). Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Cc: Gautham R Shenoy <ego@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index d52f9b426521..a480834c9627 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -52,19 +52,20 @@ static unsigned int def_sampling_rate;
52static void do_dbs_timer(struct work_struct *work); 52static void do_dbs_timer(struct work_struct *work);
53 53
54/* Sampling types */ 54/* Sampling types */
55enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; 55enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
56 56
57struct cpu_dbs_info_s { 57struct cpu_dbs_info_s {
58 cputime64_t prev_cpu_idle; 58 cputime64_t prev_cpu_idle;
59 cputime64_t prev_cpu_wall; 59 cputime64_t prev_cpu_wall;
60 struct cpufreq_policy *cur_policy; 60 struct cpufreq_policy *cur_policy;
61 struct delayed_work work; 61 struct delayed_work work;
62 enum dbs_sample sample_type;
63 unsigned int enable;
64 struct cpufreq_frequency_table *freq_table; 62 struct cpufreq_frequency_table *freq_table;
65 unsigned int freq_lo; 63 unsigned int freq_lo;
66 unsigned int freq_lo_jiffies; 64 unsigned int freq_lo_jiffies;
67 unsigned int freq_hi_jiffies; 65 unsigned int freq_hi_jiffies;
66 int cpu;
67 unsigned int enable:1,
68 sample_type:1;
68}; 69};
69static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 70static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
70 71
@@ -402,7 +403,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
402 if (load < (dbs_tuners_ins.up_threshold - 10)) { 403 if (load < (dbs_tuners_ins.up_threshold - 10)) {
403 unsigned int freq_next, freq_cur; 404 unsigned int freq_next, freq_cur;
404 405
405 freq_cur = cpufreq_driver_getavg(policy); 406 freq_cur = __cpufreq_driver_getavg(policy);
406 if (!freq_cur) 407 if (!freq_cur)
407 freq_cur = policy->cur; 408 freq_cur = policy->cur;
408 409
@@ -423,9 +424,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
423 424
424static void do_dbs_timer(struct work_struct *work) 425static void do_dbs_timer(struct work_struct *work)
425{ 426{
426 unsigned int cpu = smp_processor_id(); 427 struct cpu_dbs_info_s *dbs_info =
427 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 428 container_of(work, struct cpu_dbs_info_s, work.work);
428 enum dbs_sample sample_type = dbs_info->sample_type; 429 unsigned int cpu = dbs_info->cpu;
430 int sample_type = dbs_info->sample_type;
431
429 /* We want all CPUs to do sampling nearly on same jiffy */ 432 /* We want all CPUs to do sampling nearly on same jiffy */
430 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 433 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
431 434
@@ -454,17 +457,17 @@ static void do_dbs_timer(struct work_struct *work)
454 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 457 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
455} 458}
456 459
457static inline void dbs_timer_init(unsigned int cpu) 460static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
458{ 461{
459 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
460 /* We want all CPUs to do sampling nearly on same jiffy */ 462 /* We want all CPUs to do sampling nearly on same jiffy */
461 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 463 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
462 delay -= jiffies % delay; 464 delay -= jiffies % delay;
463 465
464 ondemand_powersave_bias_init(); 466 ondemand_powersave_bias_init();
465 INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
466 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 467 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
467 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 468 INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
469 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
470 delay);
468} 471}
469 472
470static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 473static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
@@ -528,6 +531,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
528 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); 531 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
529 j_dbs_info->prev_cpu_wall = get_jiffies_64(); 532 j_dbs_info->prev_cpu_wall = get_jiffies_64();
530 } 533 }
534 this_dbs_info->cpu = cpu;
531 this_dbs_info->enable = 1; 535 this_dbs_info->enable = 1;
532 /* 536 /*
533 * Start the timerschedule work, when this governor 537 * Start the timerschedule work, when this governor
@@ -548,7 +552,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
548 552
549 dbs_tuners_ins.sampling_rate = def_sampling_rate; 553 dbs_tuners_ins.sampling_rate = def_sampling_rate;
550 } 554 }
551 dbs_timer_init(policy->cpu); 555 dbs_timer_init(this_dbs_info);
552 556
553 mutex_unlock(&dbs_mutex); 557 mutex_unlock(&dbs_mutex);
554 break; 558 break;