aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2005-05-31 22:03:47 -0400
committerDave Jones <davej@redhat.com>2005-05-31 22:03:47 -0400
commit3d5ee9e55d13de28d2fa58d6e13f2e4d3a5f8b1a (patch)
tree8a19d6049bbffe4533724684dddb9e6c0856aba0
parentb9170836d1aa4ded7cc1ac1cb8fbc7867061c98c (diff)
[CPUFREQ] Add support to cpufreq_ondemand to ignore 'nice' cpu time
Signed-off-by: Alexander Clouter <alex-kernel@digriz.org.uk> Signed-off-by: Dave Jones <davej@redhat.com>
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c66
1 files changed, 62 insertions, 4 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 84c658822a10..7d7244314ac9 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -78,6 +78,7 @@ struct dbs_tuners {
78 unsigned int sampling_down_factor; 78 unsigned int sampling_down_factor;
79 unsigned int up_threshold; 79 unsigned int up_threshold;
80 unsigned int down_threshold; 80 unsigned int down_threshold;
81 unsigned int ignore_nice;
81}; 82};
82 83
83static struct dbs_tuners dbs_tuners_ins = { 84static struct dbs_tuners dbs_tuners_ins = {
@@ -115,6 +116,7 @@ show_one(sampling_rate, sampling_rate);
115show_one(sampling_down_factor, sampling_down_factor); 116show_one(sampling_down_factor, sampling_down_factor);
116show_one(up_threshold, up_threshold); 117show_one(up_threshold, up_threshold);
117show_one(down_threshold, down_threshold); 118show_one(down_threshold, down_threshold);
119show_one(ignore_nice, ignore_nice);
118 120
119static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, 121static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
120 const char *buf, size_t count) 122 const char *buf, size_t count)
@@ -193,6 +195,46 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
193 return count; 195 return count;
194} 196}
195 197
198static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
199 const char *buf, size_t count)
200{
201 unsigned int input;
202 int ret;
203
204 unsigned int j;
205
206 ret = sscanf (buf, "%u", &input);
207 if ( ret != 1 )
208 return -EINVAL;
209
210 if ( input > 1 )
211 input = 1;
212
213 down(&dbs_sem);
214 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
215 up(&dbs_sem);
216 return count;
217 }
218 dbs_tuners_ins.ignore_nice = input;
219
220 /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
221 for_each_cpu_mask(j, policy->cpus) {
222 struct cpu_dbs_info_s *j_dbs_info;
223 j_dbs_info = &per_cpu(cpu_dbs_info, j);
224 j_dbs_info->cur_policy = policy;
225
226 j_dbs_info->prev_cpu_idle_up =
227 kstat_cpu(j).cpustat.idle +
228 kstat_cpu(j).cpustat.iowait +
229 ( !dbs_tuners_ins.ignore_nice
230 ? kstat_cpu(j).cpustat.nice : 0 );
231 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
232 }
233 up(&dbs_sem);
234
235 return count;
236}
237
196#define define_one_rw(_name) \ 238#define define_one_rw(_name) \
197static struct freq_attr _name = \ 239static struct freq_attr _name = \
198__ATTR(_name, 0644, show_##_name, store_##_name) 240__ATTR(_name, 0644, show_##_name, store_##_name)
@@ -201,6 +243,7 @@ define_one_rw(sampling_rate);
201define_one_rw(sampling_down_factor); 243define_one_rw(sampling_down_factor);
202define_one_rw(up_threshold); 244define_one_rw(up_threshold);
203define_one_rw(down_threshold); 245define_one_rw(down_threshold);
246define_one_rw(ignore_nice);
204 247
205static struct attribute * dbs_attributes[] = { 248static struct attribute * dbs_attributes[] = {
206 &sampling_rate_max.attr, 249 &sampling_rate_max.attr,
@@ -209,6 +252,7 @@ static struct attribute * dbs_attributes[] = {
209 &sampling_down_factor.attr, 252 &sampling_down_factor.attr,
210 &up_threshold.attr, 253 &up_threshold.attr,
211 &down_threshold.attr, 254 &down_threshold.attr,
255 &ignore_nice.attr,
212 NULL 256 NULL
213}; 257};
214 258
@@ -253,6 +297,9 @@ static void dbs_check_cpu(int cpu)
253 /* Check for frequency increase */ 297 /* Check for frequency increase */
254 total_idle_ticks = kstat_cpu(cpu).cpustat.idle + 298 total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
255 kstat_cpu(cpu).cpustat.iowait; 299 kstat_cpu(cpu).cpustat.iowait;
300 /* consider 'nice' tasks as 'idle' time too if required */
301 if (dbs_tuners_ins.ignore_nice == 0)
302 total_idle_ticks += kstat_cpu(cpu).cpustat.nice;
256 idle_ticks = total_idle_ticks - 303 idle_ticks = total_idle_ticks -
257 this_dbs_info->prev_cpu_idle_up; 304 this_dbs_info->prev_cpu_idle_up;
258 this_dbs_info->prev_cpu_idle_up = total_idle_ticks; 305 this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@@ -269,6 +316,9 @@ static void dbs_check_cpu(int cpu)
269 /* Check for frequency increase */ 316 /* Check for frequency increase */
270 total_idle_ticks = kstat_cpu(j).cpustat.idle + 317 total_idle_ticks = kstat_cpu(j).cpustat.idle +
271 kstat_cpu(j).cpustat.iowait; 318 kstat_cpu(j).cpustat.iowait;
319 /* consider 'nice' too? */
320 if (dbs_tuners_ins.ignore_nice == 0)
321 total_idle_ticks += kstat_cpu(j).cpustat.nice;
272 tmp_idle_ticks = total_idle_ticks - 322 tmp_idle_ticks = total_idle_ticks -
273 j_dbs_info->prev_cpu_idle_up; 323 j_dbs_info->prev_cpu_idle_up;
274 j_dbs_info->prev_cpu_idle_up = total_idle_ticks; 324 j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@@ -297,6 +347,9 @@ static void dbs_check_cpu(int cpu)
297 347
298 total_idle_ticks = kstat_cpu(cpu).cpustat.idle + 348 total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
299 kstat_cpu(cpu).cpustat.iowait; 349 kstat_cpu(cpu).cpustat.iowait;
350 /* consider 'nice' too? */
351 if (dbs_tuners_ins.ignore_nice == 0)
352 total_idle_ticks += kstat_cpu(cpu).cpustat.nice;
300 idle_ticks = total_idle_ticks - 353 idle_ticks = total_idle_ticks -
301 this_dbs_info->prev_cpu_idle_down; 354 this_dbs_info->prev_cpu_idle_down;
302 this_dbs_info->prev_cpu_idle_down = total_idle_ticks; 355 this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -312,6 +365,9 @@ static void dbs_check_cpu(int cpu)
312 /* Check for frequency increase */ 365 /* Check for frequency increase */
313 total_idle_ticks = kstat_cpu(j).cpustat.idle + 366 total_idle_ticks = kstat_cpu(j).cpustat.idle +
314 kstat_cpu(j).cpustat.iowait; 367 kstat_cpu(j).cpustat.iowait;
368 /* consider 'nice' too? */
369 if (dbs_tuners_ins.ignore_nice == 0)
370 total_idle_ticks += kstat_cpu(j).cpustat.nice;
315 tmp_idle_ticks = total_idle_ticks - 371 tmp_idle_ticks = total_idle_ticks -
316 j_dbs_info->prev_cpu_idle_down; 372 j_dbs_info->prev_cpu_idle_down;
317 j_dbs_info->prev_cpu_idle_down = total_idle_ticks; 373 j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -397,10 +453,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
397 453
398 j_dbs_info->prev_cpu_idle_up = 454 j_dbs_info->prev_cpu_idle_up =
399 kstat_cpu(j).cpustat.idle + 455 kstat_cpu(j).cpustat.idle +
400 kstat_cpu(j).cpustat.iowait; 456 kstat_cpu(j).cpustat.iowait +
401 j_dbs_info->prev_cpu_idle_down = 457 ( !dbs_tuners_ins.ignore_nice
402 kstat_cpu(j).cpustat.idle + 458 ? kstat_cpu(j).cpustat.nice : 0 );
403 kstat_cpu(j).cpustat.iowait; 459 j_dbs_info->prev_cpu_idle_down
460 = j_dbs_info->prev_cpu_idle_up;
404 } 461 }
405 this_dbs_info->enable = 1; 462 this_dbs_info->enable = 1;
406 sysfs_create_group(&policy->kobj, &dbs_attr_group); 463 sysfs_create_group(&policy->kobj, &dbs_attr_group);
@@ -420,6 +477,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
420 def_sampling_rate = (latency / 1000) * 477 def_sampling_rate = (latency / 1000) *
421 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; 478 DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
422 dbs_tuners_ins.sampling_rate = def_sampling_rate; 479 dbs_tuners_ins.sampling_rate = def_sampling_rate;
480 dbs_tuners_ins.ignore_nice = 0;
423 481
424 dbs_timer_init(); 482 dbs_timer_init();
425 } 483 }