aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2009-01-23 09:25:02 -0500
committerDave Jones <davej@redhat.com>2009-02-05 12:25:26 -0500
commit1ca3abdb6a4b87246b00292f048acd344325fd12 (patch)
tree787f3d255676da7110b4e09b6fe91ce50286e888 /drivers
parenteda58a85ec3fc05855a26654d97a2b53f0e715b9 (diff)
[CPUFREQ] Make ignore_nice_load setting of ondemand work as expected.
ondemand micro-accounting of idle time changes broke ignore_nice_load sysfs setting due to a thinko in the code. The bug entry: http://bugzilla.kernel.org/show_bug.cgi?id=12310 Reported-by: Jim Bray <jimsantelmo@gmail.com> Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c47
1 files changed, 25 insertions, 22 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6a2b036c9389..6f45b1658a67 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -117,11 +117,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
117 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); 117 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
118 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); 118 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
119 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); 119 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
120 120 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
121 if (!dbs_tuners_ins.ignore_nice) {
122 busy_time = cputime64_add(busy_time,
123 kstat_cpu(cpu).cpustat.nice);
124 }
125 121
126 idle_time = cputime64_sub(cur_wall_time, busy_time); 122 idle_time = cputime64_sub(cur_wall_time, busy_time);
127 if (wall) 123 if (wall)
@@ -137,23 +133,6 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
137 if (idle_time == -1ULL) 133 if (idle_time == -1ULL)
138 return get_cpu_idle_time_jiffy(cpu, wall); 134 return get_cpu_idle_time_jiffy(cpu, wall);
139 135
140 if (dbs_tuners_ins.ignore_nice) {
141 cputime64_t cur_nice;
142 unsigned long cur_nice_jiffies;
143 struct cpu_dbs_info_s *dbs_info;
144
145 dbs_info = &per_cpu(cpu_dbs_info, cpu);
146 cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice,
147 dbs_info->prev_cpu_nice);
148 /*
149 * Assumption: nice time between sampling periods will be
150 * less than 2^32 jiffies for 32 bit sys
151 */
152 cur_nice_jiffies = (unsigned long)
153 cputime64_to_jiffies64(cur_nice);
154 dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice;
155 return idle_time + jiffies_to_usecs(cur_nice_jiffies);
156 }
157 return idle_time; 136 return idle_time;
158} 137}
159 138
@@ -319,6 +298,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
319 dbs_info = &per_cpu(cpu_dbs_info, j); 298 dbs_info = &per_cpu(cpu_dbs_info, j);
320 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 299 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
321 &dbs_info->prev_cpu_wall); 300 &dbs_info->prev_cpu_wall);
301 if (dbs_tuners_ins.ignore_nice)
302 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
303
322 } 304 }
323 mutex_unlock(&dbs_mutex); 305 mutex_unlock(&dbs_mutex);
324 306
@@ -419,6 +401,23 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
419 j_dbs_info->prev_cpu_idle); 401 j_dbs_info->prev_cpu_idle);
420 j_dbs_info->prev_cpu_idle = cur_idle_time; 402 j_dbs_info->prev_cpu_idle = cur_idle_time;
421 403
404 if (dbs_tuners_ins.ignore_nice) {
405 cputime64_t cur_nice;
406 unsigned long cur_nice_jiffies;
407
408 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
409 j_dbs_info->prev_cpu_nice);
410 /*
411 * Assumption: nice time between sampling periods will
412 * be less than 2^32 jiffies for 32 bit sys
413 */
414 cur_nice_jiffies = (unsigned long)
415 cputime64_to_jiffies64(cur_nice);
416
417 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
418 idle_time += jiffies_to_usecs(cur_nice_jiffies);
419 }
420
422 if (unlikely(!wall_time || wall_time < idle_time)) 421 if (unlikely(!wall_time || wall_time < idle_time))
423 continue; 422 continue;
424 423
@@ -575,6 +574,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
575 574
576 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 575 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
577 &j_dbs_info->prev_cpu_wall); 576 &j_dbs_info->prev_cpu_wall);
577 if (dbs_tuners_ins.ignore_nice) {
578 j_dbs_info->prev_cpu_nice =
579 kstat_cpu(j).cpustat.nice;
580 }
578 } 581 }
579 this_dbs_info->cpu = cpu; 582 this_dbs_info->cpu = cpu;
580 /* 583 /*