aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-16 21:30:26 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-16 21:30:26 -0500
commit4c5cdb1e1f2a502069f57a60b5c6b97b8106c73c (patch)
tree1067a3ce7dd39f3ac3aa3b961b1f0612507f694e /drivers
parent8c0863403f109a43d7000b4646da4818220d501f (diff)
parent18a7247d1bb2e2dcbab628d7e786d03df5bf1eed (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
* master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq: [CPUFREQ] Fix up whitespace in conservative governor. [CPUFREQ] Make cpufreq_conservative handle out-of-sync events properly [CPUFREQ] architectural pstate driver for powernow-k8
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c153
1 files changed, 90 insertions, 63 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 4bd33ce8a6f3..1bba99747f5b 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -37,17 +37,17 @@
37#define DEF_FREQUENCY_UP_THRESHOLD (80) 37#define DEF_FREQUENCY_UP_THRESHOLD (80)
38#define DEF_FREQUENCY_DOWN_THRESHOLD (20) 38#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
39 39
40/* 40/*
41 * The polling frequency of this governor depends on the capability of 41 * The polling frequency of this governor depends on the capability of
42 * the processor. Default polling frequency is 1000 times the transition 42 * the processor. Default polling frequency is 1000 times the transition
43 * latency of the processor. The governor will work on any processor with 43 * latency of the processor. The governor will work on any processor with
44 * transition latency <= 10mS, using appropriate sampling 44 * transition latency <= 10mS, using appropriate sampling
45 * rate. 45 * rate.
46 * For CPUs with transition latency > 10mS (mostly drivers 46 * For CPUs with transition latency > 10mS (mostly drivers
47 * with CPUFREQ_ETERNAL), this governor will not work. 47 * with CPUFREQ_ETERNAL), this governor will not work.
48 * All times here are in uS. 48 * All times here are in uS.
49 */ 49 */
50static unsigned int def_sampling_rate; 50static unsigned int def_sampling_rate;
51#define MIN_SAMPLING_RATE_RATIO (2) 51#define MIN_SAMPLING_RATE_RATIO (2)
52/* for correct statistics, we need at least 10 ticks between each measure */ 52/* for correct statistics, we need at least 10 ticks between each measure */
53#define MIN_STAT_SAMPLING_RATE \ 53#define MIN_STAT_SAMPLING_RATE \
@@ -63,12 +63,12 @@ static unsigned int def_sampling_rate;
63static void do_dbs_timer(struct work_struct *work); 63static void do_dbs_timer(struct work_struct *work);
64 64
65struct cpu_dbs_info_s { 65struct cpu_dbs_info_s {
66 struct cpufreq_policy *cur_policy; 66 struct cpufreq_policy *cur_policy;
67 unsigned int prev_cpu_idle_up; 67 unsigned int prev_cpu_idle_up;
68 unsigned int prev_cpu_idle_down; 68 unsigned int prev_cpu_idle_down;
69 unsigned int enable; 69 unsigned int enable;
70 unsigned int down_skip; 70 unsigned int down_skip;
71 unsigned int requested_freq; 71 unsigned int requested_freq;
72}; 72};
73static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 73static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
74 74
@@ -82,24 +82,24 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
82 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock 82 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
83 * is recursive for the same process. -Venki 83 * is recursive for the same process. -Venki
84 */ 84 */
85static DEFINE_MUTEX (dbs_mutex); 85static DEFINE_MUTEX (dbs_mutex);
86static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); 86static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
87 87
88struct dbs_tuners { 88struct dbs_tuners {
89 unsigned int sampling_rate; 89 unsigned int sampling_rate;
90 unsigned int sampling_down_factor; 90 unsigned int sampling_down_factor;
91 unsigned int up_threshold; 91 unsigned int up_threshold;
92 unsigned int down_threshold; 92 unsigned int down_threshold;
93 unsigned int ignore_nice; 93 unsigned int ignore_nice;
94 unsigned int freq_step; 94 unsigned int freq_step;
95}; 95};
96 96
97static struct dbs_tuners dbs_tuners_ins = { 97static struct dbs_tuners dbs_tuners_ins = {
98 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, 98 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
99 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, 99 .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
100 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, 100 .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
101 .ignore_nice = 0, 101 .ignore_nice = 0,
102 .freq_step = 5, 102 .freq_step = 5,
103}; 103};
104 104
105static inline unsigned int get_cpu_idle_time(unsigned int cpu) 105static inline unsigned int get_cpu_idle_time(unsigned int cpu)
@@ -109,13 +109,34 @@ static inline unsigned int get_cpu_idle_time(unsigned int cpu)
109 if (dbs_tuners_ins.ignore_nice) 109 if (dbs_tuners_ins.ignore_nice)
110 add_nice = kstat_cpu(cpu).cpustat.nice; 110 add_nice = kstat_cpu(cpu).cpustat.nice;
111 111
112 ret = kstat_cpu(cpu).cpustat.idle + 112 ret = kstat_cpu(cpu).cpustat.idle +
113 kstat_cpu(cpu).cpustat.iowait + 113 kstat_cpu(cpu).cpustat.iowait +
114 add_nice; 114 add_nice;
115 115
116 return ret; 116 return ret;
117} 117}
118 118
119/* keep track of frequency transitions */
120static int
121dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
122 void *data)
123{
124 struct cpufreq_freqs *freq = data;
125 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
126 freq->cpu);
127
128 if (!this_dbs_info->enable)
129 return 0;
130
131 this_dbs_info->requested_freq = freq->new;
132
133 return 0;
134}
135
136static struct notifier_block dbs_cpufreq_notifier_block = {
137 .notifier_call = dbs_cpufreq_notifier
138};
139
119/************************** sysfs interface ************************/ 140/************************** sysfs interface ************************/
120static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 141static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
121{ 142{
@@ -127,8 +148,8 @@ static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
127 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE); 148 return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
128} 149}
129 150
130#define define_one_ro(_name) \ 151#define define_one_ro(_name) \
131static struct freq_attr _name = \ 152static struct freq_attr _name = \
132__ATTR(_name, 0444, show_##_name, NULL) 153__ATTR(_name, 0444, show_##_name, NULL)
133 154
134define_one_ro(sampling_rate_max); 155define_one_ro(sampling_rate_max);
@@ -148,7 +169,7 @@ show_one(down_threshold, down_threshold);
148show_one(ignore_nice_load, ignore_nice); 169show_one(ignore_nice_load, ignore_nice);
149show_one(freq_step, freq_step); 170show_one(freq_step, freq_step);
150 171
151static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 172static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
152 const char *buf, size_t count) 173 const char *buf, size_t count)
153{ 174{
154 unsigned int input; 175 unsigned int input;
@@ -164,7 +185,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
164 return count; 185 return count;
165} 186}
166 187
167static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 188static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
168 const char *buf, size_t count) 189 const char *buf, size_t count)
169{ 190{
170 unsigned int input; 191 unsigned int input;
@@ -183,7 +204,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
183 return count; 204 return count;
184} 205}
185 206
186static ssize_t store_up_threshold(struct cpufreq_policy *unused, 207static ssize_t store_up_threshold(struct cpufreq_policy *unused,
187 const char *buf, size_t count) 208 const char *buf, size_t count)
188{ 209{
189 unsigned int input; 210 unsigned int input;
@@ -202,7 +223,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
202 return count; 223 return count;
203} 224}
204 225
205static ssize_t store_down_threshold(struct cpufreq_policy *unused, 226static ssize_t store_down_threshold(struct cpufreq_policy *unused,
206 const char *buf, size_t count) 227 const char *buf, size_t count)
207{ 228{
208 unsigned int input; 229 unsigned int input;
@@ -228,16 +249,16 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
228 int ret; 249 int ret;
229 250
230 unsigned int j; 251 unsigned int j;
231 252
232 ret = sscanf (buf, "%u", &input); 253 ret = sscanf(buf, "%u", &input);
233 if ( ret != 1 ) 254 if (ret != 1)
234 return -EINVAL; 255 return -EINVAL;
235 256
236 if ( input > 1 ) 257 if (input > 1)
237 input = 1; 258 input = 1;
238 259
239 mutex_lock(&dbs_mutex); 260 mutex_lock(&dbs_mutex);
240 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 261 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
241 mutex_unlock(&dbs_mutex); 262 mutex_unlock(&dbs_mutex);
242 return count; 263 return count;
243 } 264 }
@@ -261,14 +282,14 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
261 unsigned int input; 282 unsigned int input;
262 int ret; 283 int ret;
263 284
264 ret = sscanf (buf, "%u", &input); 285 ret = sscanf(buf, "%u", &input);
265 286
266 if ( ret != 1 ) 287 if (ret != 1)
267 return -EINVAL; 288 return -EINVAL;
268 289
269 if ( input > 100 ) 290 if (input > 100)
270 input = 100; 291 input = 100;
271 292
272 /* no need to test here if freq_step is zero as the user might actually 293 /* no need to test here if freq_step is zero as the user might actually
273 * want this, they would be crazy though :) */ 294 * want this, they would be crazy though :) */
274 mutex_lock(&dbs_mutex); 295 mutex_lock(&dbs_mutex);
@@ -322,18 +343,18 @@ static void dbs_check_cpu(int cpu)
322 343
323 policy = this_dbs_info->cur_policy; 344 policy = this_dbs_info->cur_policy;
324 345
325 /* 346 /*
326 * The default safe range is 20% to 80% 347 * The default safe range is 20% to 80%
327 * Every sampling_rate, we check 348 * Every sampling_rate, we check
328 * - If current idle time is less than 20%, then we try to 349 * - If current idle time is less than 20%, then we try to
329 * increase frequency 350 * increase frequency
330 * Every sampling_rate*sampling_down_factor, we check 351 * Every sampling_rate*sampling_down_factor, we check
331 * - If current idle time is more than 80%, then we try to 352 * - If current idle time is more than 80%, then we try to
332 * decrease frequency 353 * decrease frequency
333 * 354 *
334 * Any frequency increase takes it to the maximum frequency. 355 * Any frequency increase takes it to the maximum frequency.
335 * Frequency reduction happens at minimum steps of 356 * Frequency reduction happens at minimum steps of
336 * 5% (default) of max_frequency 357 * 5% (default) of max_frequency
337 */ 358 */
338 359
339 /* Check for frequency increase */ 360 /* Check for frequency increase */
@@ -361,13 +382,13 @@ static void dbs_check_cpu(int cpu)
361 /* if we are already at full speed then break out early */ 382 /* if we are already at full speed then break out early */
362 if (this_dbs_info->requested_freq == policy->max) 383 if (this_dbs_info->requested_freq == policy->max)
363 return; 384 return;
364 385
365 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; 386 freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
366 387
367 /* max freq cannot be less than 100. But who knows.... */ 388 /* max freq cannot be less than 100. But who knows.... */
368 if (unlikely(freq_step == 0)) 389 if (unlikely(freq_step == 0))
369 freq_step = 5; 390 freq_step = 5;
370 391
371 this_dbs_info->requested_freq += freq_step; 392 this_dbs_info->requested_freq += freq_step;
372 if (this_dbs_info->requested_freq > policy->max) 393 if (this_dbs_info->requested_freq > policy->max)
373 this_dbs_info->requested_freq = policy->max; 394 this_dbs_info->requested_freq = policy->max;
@@ -427,15 +448,15 @@ static void dbs_check_cpu(int cpu)
427} 448}
428 449
429static void do_dbs_timer(struct work_struct *work) 450static void do_dbs_timer(struct work_struct *work)
430{ 451{
431 int i; 452 int i;
432 mutex_lock(&dbs_mutex); 453 mutex_lock(&dbs_mutex);
433 for_each_online_cpu(i) 454 for_each_online_cpu(i)
434 dbs_check_cpu(i); 455 dbs_check_cpu(i);
435 schedule_delayed_work(&dbs_work, 456 schedule_delayed_work(&dbs_work,
436 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 457 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
437 mutex_unlock(&dbs_mutex); 458 mutex_unlock(&dbs_mutex);
438} 459}
439 460
440static inline void dbs_timer_init(void) 461static inline void dbs_timer_init(void)
441{ 462{
@@ -462,13 +483,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
462 483
463 switch (event) { 484 switch (event) {
464 case CPUFREQ_GOV_START: 485 case CPUFREQ_GOV_START:
465 if ((!cpu_online(cpu)) || 486 if ((!cpu_online(cpu)) || (!policy->cur))
466 (!policy->cur))
467 return -EINVAL; 487 return -EINVAL;
468 488
469 if (this_dbs_info->enable) /* Already enabled */ 489 if (this_dbs_info->enable) /* Already enabled */
470 break; 490 break;
471 491
472 mutex_lock(&dbs_mutex); 492 mutex_lock(&dbs_mutex);
473 493
474 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 494 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
@@ -481,7 +501,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
481 struct cpu_dbs_info_s *j_dbs_info; 501 struct cpu_dbs_info_s *j_dbs_info;
482 j_dbs_info = &per_cpu(cpu_dbs_info, j); 502 j_dbs_info = &per_cpu(cpu_dbs_info, j);
483 j_dbs_info->cur_policy = policy; 503 j_dbs_info->cur_policy = policy;
484 504
485 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu); 505 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu);
486 j_dbs_info->prev_cpu_idle_down 506 j_dbs_info->prev_cpu_idle_down
487 = j_dbs_info->prev_cpu_idle_up; 507 = j_dbs_info->prev_cpu_idle_up;
@@ -511,8 +531,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
511 dbs_tuners_ins.sampling_rate = def_sampling_rate; 531 dbs_tuners_ins.sampling_rate = def_sampling_rate;
512 532
513 dbs_timer_init(); 533 dbs_timer_init();
534 cpufreq_register_notifier(
535 &dbs_cpufreq_notifier_block,
536 CPUFREQ_TRANSITION_NOTIFIER);
514 } 537 }
515 538
516 mutex_unlock(&dbs_mutex); 539 mutex_unlock(&dbs_mutex);
517 break; 540 break;
518 541
@@ -525,9 +548,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
525 * Stop the timerschedule work, when this governor 548 * Stop the timerschedule work, when this governor
526 * is used for first time 549 * is used for first time
527 */ 550 */
528 if (dbs_enable == 0) 551 if (dbs_enable == 0) {
529 dbs_timer_exit(); 552 dbs_timer_exit();
530 553 cpufreq_unregister_notifier(
554 &dbs_cpufreq_notifier_block,
555 CPUFREQ_TRANSITION_NOTIFIER);
556 }
557
531 mutex_unlock(&dbs_mutex); 558 mutex_unlock(&dbs_mutex);
532 559
533 break; 560 break;
@@ -537,11 +564,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
537 if (policy->max < this_dbs_info->cur_policy->cur) 564 if (policy->max < this_dbs_info->cur_policy->cur)
538 __cpufreq_driver_target( 565 __cpufreq_driver_target(
539 this_dbs_info->cur_policy, 566 this_dbs_info->cur_policy,
540 policy->max, CPUFREQ_RELATION_H); 567 policy->max, CPUFREQ_RELATION_H);
541 else if (policy->min > this_dbs_info->cur_policy->cur) 568 else if (policy->min > this_dbs_info->cur_policy->cur)
542 __cpufreq_driver_target( 569 __cpufreq_driver_target(
543 this_dbs_info->cur_policy, 570 this_dbs_info->cur_policy,
544 policy->min, CPUFREQ_RELATION_L); 571 policy->min, CPUFREQ_RELATION_L);
545 mutex_unlock(&dbs_mutex); 572 mutex_unlock(&dbs_mutex);
546 break; 573 break;
547 } 574 }