aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_ondemand.c
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2006-02-28 00:43:23 -0500
committerDave Jones <davej@redhat.com>2006-02-28 00:43:23 -0500
commit32ee8c3e470d86588b51dc42ed01e85c5fa0f180 (patch)
treed544cc24c37c02f44f9cf89cb5647d74a61d7ce6 /drivers/cpufreq/cpufreq_ondemand.c
parent8ad5496d2359a19127ad9f2eda69485025c9917f (diff)
[CPUFREQ] Lots of whitespace & CodingStyle cleanup.
Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c86
1 files changed, 43 insertions, 43 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 9ee9411f186f..69aa1db8336c 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -38,17 +38,17 @@
38#define MIN_FREQUENCY_UP_THRESHOLD (11) 38#define MIN_FREQUENCY_UP_THRESHOLD (11)
39#define MAX_FREQUENCY_UP_THRESHOLD (100) 39#define MAX_FREQUENCY_UP_THRESHOLD (100)
40 40
41/* 41/*
42 * The polling frequency of this governor depends on the capability of 42 * The polling frequency of this governor depends on the capability of
43 * the processor. Default polling frequency is 1000 times the transition 43 * the processor. Default polling frequency is 1000 times the transition
44 * latency of the processor. The governor will work on any processor with 44 * latency of the processor. The governor will work on any processor with
45 * transition latency <= 10mS, using appropriate sampling 45 * transition latency <= 10mS, using appropriate sampling
46 * rate. 46 * rate.
47 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) 47 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
48 * this governor will not work. 48 * this governor will not work.
49 * All times here are in uS. 49 * All times here are in uS.
50 */ 50 */
51static unsigned int def_sampling_rate; 51static unsigned int def_sampling_rate;
52#define MIN_SAMPLING_RATE_RATIO (2) 52#define MIN_SAMPLING_RATE_RATIO (2)
53/* for correct statistics, we need at least 10 ticks between each measure */ 53/* for correct statistics, we need at least 10 ticks between each measure */
54#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10)) 54#define MIN_STAT_SAMPLING_RATE (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
@@ -62,28 +62,28 @@ static unsigned int def_sampling_rate;
62static void do_dbs_timer(void *data); 62static void do_dbs_timer(void *data);
63 63
64struct cpu_dbs_info_s { 64struct cpu_dbs_info_s {
65 struct cpufreq_policy *cur_policy; 65 struct cpufreq_policy *cur_policy;
66 unsigned int prev_cpu_idle_up; 66 unsigned int prev_cpu_idle_up;
67 unsigned int prev_cpu_idle_down; 67 unsigned int prev_cpu_idle_down;
68 unsigned int enable; 68 unsigned int enable;
69}; 69};
70static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 70static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
71 71
72static unsigned int dbs_enable; /* number of CPUs using this policy */ 72static unsigned int dbs_enable; /* number of CPUs using this policy */
73 73
74static DEFINE_MUTEX (dbs_mutex); 74static DEFINE_MUTEX (dbs_mutex);
75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
76 76
77struct dbs_tuners { 77struct dbs_tuners {
78 unsigned int sampling_rate; 78 unsigned int sampling_rate;
79 unsigned int sampling_down_factor; 79 unsigned int sampling_down_factor;
80 unsigned int up_threshold; 80 unsigned int up_threshold;
81 unsigned int ignore_nice; 81 unsigned int ignore_nice;
82}; 82};
83 83
84static struct dbs_tuners dbs_tuners_ins = { 84static struct dbs_tuners dbs_tuners_ins = {
85 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 85 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
86 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 86 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
87}; 87};
88 88
89static inline unsigned int get_cpu_idle_time(unsigned int cpu) 89static inline unsigned int get_cpu_idle_time(unsigned int cpu)
@@ -106,8 +106,8 @@ static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
106 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); 106 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
107} 107}
108 108
109#define define_one_ro(_name) \ 109#define define_one_ro(_name) \
110static struct freq_attr _name = \ 110static struct freq_attr _name = \
111__ATTR(_name, 0444, show_##_name, NULL) 111__ATTR(_name, 0444, show_##_name, NULL)
112 112
113define_one_ro(sampling_rate_max); 113define_one_ro(sampling_rate_max);
@@ -125,7 +125,7 @@ show_one(sampling_down_factor, sampling_down_factor);
125show_one(up_threshold, up_threshold); 125show_one(up_threshold, up_threshold);
126show_one(ignore_nice_load, ignore_nice); 126show_one(ignore_nice_load, ignore_nice);
127 127
128static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 128static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
129 const char *buf, size_t count) 129 const char *buf, size_t count)
130{ 130{
131 unsigned int input; 131 unsigned int input;
@@ -144,7 +144,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
144 return count; 144 return count;
145} 145}
146 146
147static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 147static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
148 const char *buf, size_t count) 148 const char *buf, size_t count)
149{ 149{
150 unsigned int input; 150 unsigned int input;
@@ -163,7 +163,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
163 return count; 163 return count;
164} 164}
165 165
166static ssize_t store_up_threshold(struct cpufreq_policy *unused, 166static ssize_t store_up_threshold(struct cpufreq_policy *unused,
167 const char *buf, size_t count) 167 const char *buf, size_t count)
168{ 168{
169 unsigned int input; 169 unsigned int input;
@@ -171,7 +171,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
171 ret = sscanf (buf, "%u", &input); 171 ret = sscanf (buf, "%u", &input);
172 172
173 mutex_lock(&dbs_mutex); 173 mutex_lock(&dbs_mutex);
174 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 174 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
175 input < MIN_FREQUENCY_UP_THRESHOLD) { 175 input < MIN_FREQUENCY_UP_THRESHOLD) {
176 mutex_unlock(&dbs_mutex); 176 mutex_unlock(&dbs_mutex);
177 return -EINVAL; 177 return -EINVAL;
@@ -190,14 +190,14 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
190 int ret; 190 int ret;
191 191
192 unsigned int j; 192 unsigned int j;
193 193
194 ret = sscanf (buf, "%u", &input); 194 ret = sscanf (buf, "%u", &input);
195 if ( ret != 1 ) 195 if ( ret != 1 )
196 return -EINVAL; 196 return -EINVAL;
197 197
198 if ( input > 1 ) 198 if ( input > 1 )
199 input = 1; 199 input = 1;
200 200
201 mutex_lock(&dbs_mutex); 201 mutex_lock(&dbs_mutex);
202 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 202 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
203 mutex_unlock(&dbs_mutex); 203 mutex_unlock(&dbs_mutex);
@@ -259,16 +259,16 @@ static void dbs_check_cpu(int cpu)
259 return; 259 return;
260 260
261 policy = this_dbs_info->cur_policy; 261 policy = this_dbs_info->cur_policy;
262 /* 262 /*
263 * Every sampling_rate, we check, if current idle time is less 263 * Every sampling_rate, we check, if current idle time is less
264 * than 20% (default), then we try to increase frequency 264 * than 20% (default), then we try to increase frequency
265 * Every sampling_rate*sampling_down_factor, we look for a the lowest 265 * Every sampling_rate*sampling_down_factor, we look for a the lowest
266 * frequency which can sustain the load while keeping idle time over 266 * frequency which can sustain the load while keeping idle time over
267 * 30%. If such a frequency exist, we try to decrease to this frequency. 267 * 30%. If such a frequency exist, we try to decrease to this frequency.
268 * 268 *
269 * Any frequency increase takes it to the maximum frequency. 269 * Any frequency increase takes it to the maximum frequency.
270 * Frequency reduction happens at minimum steps of 270 * Frequency reduction happens at minimum steps of
271 * 5% (default) of current frequency 271 * 5% (default) of current frequency
272 */ 272 */
273 273
274 /* Check for frequency increase */ 274 /* Check for frequency increase */
@@ -298,14 +298,14 @@ static void dbs_check_cpu(int cpu)
298 struct cpu_dbs_info_s *j_dbs_info; 298 struct cpu_dbs_info_s *j_dbs_info;
299 299
300 j_dbs_info = &per_cpu(cpu_dbs_info, j); 300 j_dbs_info = &per_cpu(cpu_dbs_info, j);
301 j_dbs_info->prev_cpu_idle_down = 301 j_dbs_info->prev_cpu_idle_down =
302 j_dbs_info->prev_cpu_idle_up; 302 j_dbs_info->prev_cpu_idle_up;
303 } 303 }
304 /* if we are already at full speed then break out early */ 304 /* if we are already at full speed then break out early */
305 if (policy->cur == policy->max) 305 if (policy->cur == policy->max)
306 return; 306 return;
307 307
308 __cpufreq_driver_target(policy, policy->max, 308 __cpufreq_driver_target(policy, policy->max,
309 CPUFREQ_RELATION_H); 309 CPUFREQ_RELATION_H);
310 return; 310 return;
311 } 311 }
@@ -347,7 +347,7 @@ static void dbs_check_cpu(int cpu)
347 * policy. To be safe, we focus 10 points under the threshold. 347 * policy. To be safe, we focus 10 points under the threshold.
348 */ 348 */
349 freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks; 349 freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks;
350 freq_next = (freq_next * policy->cur) / 350 freq_next = (freq_next * policy->cur) /
351 (dbs_tuners_ins.up_threshold - 10); 351 (dbs_tuners_ins.up_threshold - 10);
352 352
353 if (freq_next <= ((policy->cur * 95) / 100)) 353 if (freq_next <= ((policy->cur * 95) / 100))
@@ -355,15 +355,15 @@ static void dbs_check_cpu(int cpu)
355} 355}
356 356
357static void do_dbs_timer(void *data) 357static void do_dbs_timer(void *data)
358{ 358{
359 int i; 359 int i;
360 mutex_lock(&dbs_mutex); 360 mutex_lock(&dbs_mutex);
361 for_each_online_cpu(i) 361 for_each_online_cpu(i)
362 dbs_check_cpu(i); 362 dbs_check_cpu(i);
363 schedule_delayed_work(&dbs_work, 363 schedule_delayed_work(&dbs_work,
364 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 364 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
365 mutex_unlock(&dbs_mutex); 365 mutex_unlock(&dbs_mutex);
366} 366}
367 367
368static inline void dbs_timer_init(void) 368static inline void dbs_timer_init(void)
369{ 369{
@@ -390,7 +390,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
390 390
391 switch (event) { 391 switch (event) {
392 case CPUFREQ_GOV_START: 392 case CPUFREQ_GOV_START:
393 if ((!cpu_online(cpu)) || 393 if ((!cpu_online(cpu)) ||
394 (!policy->cur)) 394 (!policy->cur))
395 return -EINVAL; 395 return -EINVAL;
396 396
@@ -399,13 +399,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
399 return -EINVAL; 399 return -EINVAL;
400 if (this_dbs_info->enable) /* Already enabled */ 400 if (this_dbs_info->enable) /* Already enabled */
401 break; 401 break;
402 402
403 mutex_lock(&dbs_mutex); 403 mutex_lock(&dbs_mutex);
404 for_each_cpu_mask(j, policy->cpus) { 404 for_each_cpu_mask(j, policy->cpus) {
405 struct cpu_dbs_info_s *j_dbs_info; 405 struct cpu_dbs_info_s *j_dbs_info;
406 j_dbs_info = &per_cpu(cpu_dbs_info, j); 406 j_dbs_info = &per_cpu(cpu_dbs_info, j);
407 j_dbs_info->cur_policy = policy; 407 j_dbs_info->cur_policy = policy;
408 408
409 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 409 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
410 j_dbs_info->prev_cpu_idle_down 410 j_dbs_info->prev_cpu_idle_down
411 = j_dbs_info->prev_cpu_idle_up; 411 = j_dbs_info->prev_cpu_idle_up;
@@ -435,7 +435,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
435 435
436 dbs_timer_init(); 436 dbs_timer_init();
437 } 437 }
438 438
439 mutex_unlock(&dbs_mutex); 439 mutex_unlock(&dbs_mutex);
440 break; 440 break;
441 441
@@ -448,9 +448,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
448 * Stop the timerschedule work, when this governor 448 * Stop the timerschedule work, when this governor
449 * is used for first time 449 * is used for first time
450 */ 450 */
451 if (dbs_enable == 0) 451 if (dbs_enable == 0)
452 dbs_timer_exit(); 452 dbs_timer_exit();
453 453
454 mutex_unlock(&dbs_mutex); 454 mutex_unlock(&dbs_mutex);
455 455
456 break; 456 break;
@@ -460,11 +460,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
460 if (policy->max < this_dbs_info->cur_policy->cur) 460 if (policy->max < this_dbs_info->cur_policy->cur)
461 __cpufreq_driver_target( 461 __cpufreq_driver_target(
462 this_dbs_info->cur_policy, 462 this_dbs_info->cur_policy,
463 policy->max, CPUFREQ_RELATION_H); 463 policy->max, CPUFREQ_RELATION_H);
464 else if (policy->min > this_dbs_info->cur_policy->cur) 464 else if (policy->min > this_dbs_info->cur_policy->cur)
465 __cpufreq_driver_target( 465 __cpufreq_driver_target(
466 this_dbs_info->cur_policy, 466 this_dbs_info->cur_policy,
467 policy->min, CPUFREQ_RELATION_L); 467 policy->min, CPUFREQ_RELATION_L);
468 mutex_unlock(&dbs_mutex); 468 mutex_unlock(&dbs_mutex);
469 break; 469 break;
470 } 470 }