diff options
author | Dave Jones <davej@redhat.com> | 2009-01-18 01:43:44 -0500 |
---|---|---|
committer | Dave Jones <davej@redhat.com> | 2009-02-24 22:47:30 -0500 |
commit | 2b03f891ad3804dd3fa4dadfd33e5dcb200389c5 (patch) | |
tree | 66c5efcaabb0cd51dd959fd9f5e51b65274192f8 /drivers/cpufreq/cpufreq_ondemand.c | |
parent | b9e7638a301b1245d4675087a05fa90fb4fa1845 (diff) |
[CPUFREQ] checkpatch cleanups for ondemand governor.
Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 29 |
1 files changed, 13 insertions, 16 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 6f45b1658a67..1fa4420eb33c 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -65,14 +65,14 @@ struct cpu_dbs_info_s { | |||
65 | cputime64_t prev_cpu_wall; | 65 | cputime64_t prev_cpu_wall; |
66 | cputime64_t prev_cpu_nice; | 66 | cputime64_t prev_cpu_nice; |
67 | struct cpufreq_policy *cur_policy; | 67 | struct cpufreq_policy *cur_policy; |
68 | struct delayed_work work; | 68 | struct delayed_work work; |
69 | struct cpufreq_frequency_table *freq_table; | 69 | struct cpufreq_frequency_table *freq_table; |
70 | unsigned int freq_lo; | 70 | unsigned int freq_lo; |
71 | unsigned int freq_lo_jiffies; | 71 | unsigned int freq_lo_jiffies; |
72 | unsigned int freq_hi_jiffies; | 72 | unsigned int freq_hi_jiffies; |
73 | int cpu; | 73 | int cpu; |
74 | unsigned int enable:1, | 74 | unsigned int enable:1, |
75 | sample_type:1; | 75 | sample_type:1; |
76 | }; | 76 | }; |
77 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | 77 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); |
78 | 78 | ||
@@ -203,12 +203,12 @@ static void ondemand_powersave_bias_init(void) | |||
203 | /************************** sysfs interface ************************/ | 203 | /************************** sysfs interface ************************/ |
204 | static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) | 204 | static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) |
205 | { | 205 | { |
206 | return sprintf (buf, "%u\n", MAX_SAMPLING_RATE); | 206 | return sprintf(buf, "%u\n", MAX_SAMPLING_RATE); |
207 | } | 207 | } |
208 | 208 | ||
209 | static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) | 209 | static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) |
210 | { | 210 | { |
211 | return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); | 211 | return sprintf(buf, "%u\n", MIN_SAMPLING_RATE); |
212 | } | 212 | } |
213 | 213 | ||
214 | #define define_one_ro(_name) \ | 214 | #define define_one_ro(_name) \ |
@@ -279,14 +279,14 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, | |||
279 | unsigned int j; | 279 | unsigned int j; |
280 | 280 | ||
281 | ret = sscanf(buf, "%u", &input); | 281 | ret = sscanf(buf, "%u", &input); |
282 | if ( ret != 1 ) | 282 | if (ret != 1) |
283 | return -EINVAL; | 283 | return -EINVAL; |
284 | 284 | ||
285 | if ( input > 1 ) | 285 | if (input > 1) |
286 | input = 1; | 286 | input = 1; |
287 | 287 | ||
288 | mutex_lock(&dbs_mutex); | 288 | mutex_lock(&dbs_mutex); |
289 | if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ | 289 | if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ |
290 | mutex_unlock(&dbs_mutex); | 290 | mutex_unlock(&dbs_mutex); |
291 | return count; | 291 | return count; |
292 | } | 292 | } |
@@ -337,7 +337,7 @@ define_one_rw(up_threshold); | |||
337 | define_one_rw(ignore_nice_load); | 337 | define_one_rw(ignore_nice_load); |
338 | define_one_rw(powersave_bias); | 338 | define_one_rw(powersave_bias); |
339 | 339 | ||
340 | static struct attribute * dbs_attributes[] = { | 340 | static struct attribute *dbs_attributes[] = { |
341 | &sampling_rate_max.attr, | 341 | &sampling_rate_max.attr, |
342 | &sampling_rate_min.attr, | 342 | &sampling_rate_min.attr, |
343 | &sampling_rate.attr, | 343 | &sampling_rate.attr, |
@@ -512,8 +512,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
512 | } | 512 | } |
513 | } else { | 513 | } else { |
514 | __cpufreq_driver_target(dbs_info->cur_policy, | 514 | __cpufreq_driver_target(dbs_info->cur_policy, |
515 | dbs_info->freq_lo, | 515 | dbs_info->freq_lo, CPUFREQ_RELATION_H); |
516 | CPUFREQ_RELATION_H); | ||
517 | } | 516 | } |
518 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 517 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); |
519 | unlock_policy_rwsem_write(cpu); | 518 | unlock_policy_rwsem_write(cpu); |
@@ -530,7 +529,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
530 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 529 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
531 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | 530 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); |
532 | queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, | 531 | queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, |
533 | delay); | 532 | delay); |
534 | } | 533 | } |
535 | 534 | ||
536 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 535 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
@@ -617,12 +616,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
617 | mutex_lock(&dbs_mutex); | 616 | mutex_lock(&dbs_mutex); |
618 | if (policy->max < this_dbs_info->cur_policy->cur) | 617 | if (policy->max < this_dbs_info->cur_policy->cur) |
619 | __cpufreq_driver_target(this_dbs_info->cur_policy, | 618 | __cpufreq_driver_target(this_dbs_info->cur_policy, |
620 | policy->max, | 619 | policy->max, CPUFREQ_RELATION_H); |
621 | CPUFREQ_RELATION_H); | ||
622 | else if (policy->min > this_dbs_info->cur_policy->cur) | 620 | else if (policy->min > this_dbs_info->cur_policy->cur) |
623 | __cpufreq_driver_target(this_dbs_info->cur_policy, | 621 | __cpufreq_driver_target(this_dbs_info->cur_policy, |
624 | policy->min, | 622 | policy->min, CPUFREQ_RELATION_L); |
625 | CPUFREQ_RELATION_L); | ||
626 | mutex_unlock(&dbs_mutex); | 623 | mutex_unlock(&dbs_mutex); |
627 | break; | 624 | break; |
628 | } | 625 | } |
@@ -677,7 +674,7 @@ static void __exit cpufreq_gov_dbs_exit(void) | |||
677 | MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); | 674 | MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>"); |
678 | MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); | 675 | MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>"); |
679 | MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " | 676 | MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for " |
680 | "Low Latency Frequency Transition capable processors"); | 677 | "Low Latency Frequency Transition capable processors"); |
681 | MODULE_LICENSE("GPL"); | 678 | MODULE_LICENSE("GPL"); |
682 | 679 | ||
683 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND | 680 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |