aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2005-05-31 22:03:48 -0400
committerDave Jones <davej@redhat.com>2005-05-31 22:03:48 -0400
commit1206aaac285904e3e3995eecbf4129b6555a8973 (patch)
tree546fc363a58a83e0d5b8990cc3213fcbdf87c07c
parentc11420a616039e2181e4ecbffb4d125d39e6877d (diff)
[CPUFREQ] Allow ondemand stepping to be changed by user.
Adds support so that the cpufreq change stepping is no longer fixed at 5% and can be changed dynamically by the user Signed-off-by: Alexander Clouter <alex-kernel@digriz.org.uk> Signed-off-by: Dave Jones <davej@redhat.com>
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c42
1 files changed, 36 insertions, 6 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6dc83808e590..056591612467 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -79,6 +79,7 @@ struct dbs_tuners {
79 unsigned int up_threshold; 79 unsigned int up_threshold;
80 unsigned int down_threshold; 80 unsigned int down_threshold;
81 unsigned int ignore_nice; 81 unsigned int ignore_nice;
82 unsigned int freq_step;
82}; 83};
83 84
84static struct dbs_tuners dbs_tuners_ins = { 85static struct dbs_tuners dbs_tuners_ins = {
@@ -117,6 +118,7 @@ show_one(sampling_down_factor, sampling_down_factor);
117show_one(up_threshold, up_threshold); 118show_one(up_threshold, up_threshold);
118show_one(down_threshold, down_threshold); 119show_one(down_threshold, down_threshold);
119show_one(ignore_nice, ignore_nice); 120show_one(ignore_nice, ignore_nice);
121show_one(freq_step, freq_step);
120 122
121static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 123static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
122 const char *buf, size_t count) 124 const char *buf, size_t count)
@@ -235,6 +237,29 @@ static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
235 return count; 237 return count;
236} 238}
237 239
240static ssize_t store_freq_step(struct cpufreq_policy *policy,
241 const char *buf, size_t count)
242{
243 unsigned int input;
244 int ret;
245
246 ret = sscanf (buf, "%u", &input);
247
248 if ( ret != 1 )
249 return -EINVAL;
250
251 if ( input > 100 )
252 input = 100;
253
254 /* no need to test here if freq_step is zero as the user might actually
255 * want this, they would be crazy though :) */
256 down(&dbs_sem);
257 dbs_tuners_ins.freq_step = input;
258 up(&dbs_sem);
259
260 return count;
261}
262
238#define define_one_rw(_name) \ 263#define define_one_rw(_name) \
239static struct freq_attr _name = \ 264static struct freq_attr _name = \
240__ATTR(_name, 0644, show_##_name, store_##_name) 265__ATTR(_name, 0644, show_##_name, store_##_name)
@@ -244,6 +269,7 @@ define_one_rw(sampling_down_factor);
244define_one_rw(up_threshold); 269define_one_rw(up_threshold);
245define_one_rw(down_threshold); 270define_one_rw(down_threshold);
246define_one_rw(ignore_nice); 271define_one_rw(ignore_nice);
272define_one_rw(freq_step);
247 273
248static struct attribute * dbs_attributes[] = { 274static struct attribute * dbs_attributes[] = {
249 &sampling_rate_max.attr, 275 &sampling_rate_max.attr,
@@ -253,6 +279,7 @@ static struct attribute * dbs_attributes[] = {
253 &up_threshold.attr, 279 &up_threshold.attr,
254 &down_threshold.attr, 280 &down_threshold.attr,
255 &ignore_nice.attr, 281 &ignore_nice.attr,
282 &freq_step.attr,
256 NULL 283 NULL
257}; 284};
258 285
@@ -291,7 +318,7 @@ static void dbs_check_cpu(int cpu)
291 * 318 *
292 * Any frequency increase takes it to the maximum frequency. 319 * Any frequency increase takes it to the maximum frequency.
293 * Frequency reduction happens at minimum steps of 320 * Frequency reduction happens at minimum steps of
294 * 5% of max_frequency 321 * 5% (default) of max_frequency
295 */ 322 */
296 323
297 /* Check for frequency increase */ 324 /* Check for frequency increase */
@@ -390,18 +417,20 @@ static void dbs_check_cpu(int cpu)
390 usecs_to_jiffies(freq_down_sampling_rate); 417 usecs_to_jiffies(freq_down_sampling_rate);
391 418
392 if (idle_ticks > down_idle_ticks ) { 419 if (idle_ticks > down_idle_ticks ) {
393 /* if we are already at the lowest speed then break out early */ 420 /* if we are already at the lowest speed then break out early
394 if (policy->cur == policy->min) 421 * or if we 'cannot' reduce the speed as the user might want
422 * freq_step to be zero */
423 if (policy->cur == policy->min || dbs_tuners_ins.freq_step == 0)
395 return; 424 return;
396 425
397 freq_down_step = (5 * policy->max) / 100; 426 freq_down_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
398 427
399 /* max freq cannot be less than 100. But who knows.... */ 428 /* max freq cannot be less than 100. But who knows.... */
400 if (unlikely(freq_down_step == 0)) 429 if (unlikely(freq_down_step == 0))
401 freq_down_step = 5; 430 freq_down_step = 5;
402 431
403 __cpufreq_driver_target(policy, 432 __cpufreq_driver_target(policy,
404 policy->cur - freq_down_step, 433 policy->cur - freq_down_step,
405 CPUFREQ_RELATION_H); 434 CPUFREQ_RELATION_H);
406 return; 435 return;
407 } 436 }
@@ -486,6 +515,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
486 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 515 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
487 dbs_tuners_ins.sampling_rate = def_sampling_rate; 516 dbs_tuners_ins.sampling_rate = def_sampling_rate;
488 dbs_tuners_ins.ignore_nice = 0; 517 dbs_tuners_ins.ignore_nice = 0;
518 dbs_tuners_ins.freq_step = 5;
489 519
490 dbs_timer_init(); 520 dbs_timer_init();
491 } 521 }