aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-02-09 16:58:22 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-02-09 16:58:22 -0500
commit6707fbb56c8fd3121e334291d170934bcaca2e7f (patch)
tree328a4db08d1e7651f152bc11f2a48176f02a0505 /drivers
parent896abeb743579fc8be0d16d15d6768a158a3a109 (diff)
parent732553e567c2700ba5b9bccc6ec885c75779a94b (diff)
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq: [CPUFREQ] powernow-k8: Get transition latency from ACPI _PSS table [CPUFREQ] Make ignore_nice_load setting of ondemand work as expected.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c47
1 files changed, 25 insertions, 22 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 6a2b036c9389..6f45b1658a67 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -117,11 +117,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
117 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); 117 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
118 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); 118 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
119 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); 119 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
120 120 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
121 if (!dbs_tuners_ins.ignore_nice) {
122 busy_time = cputime64_add(busy_time,
123 kstat_cpu(cpu).cpustat.nice);
124 }
125 121
126 idle_time = cputime64_sub(cur_wall_time, busy_time); 122 idle_time = cputime64_sub(cur_wall_time, busy_time);
127 if (wall) 123 if (wall)
@@ -137,23 +133,6 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
137 if (idle_time == -1ULL) 133 if (idle_time == -1ULL)
138 return get_cpu_idle_time_jiffy(cpu, wall); 134 return get_cpu_idle_time_jiffy(cpu, wall);
139 135
140 if (dbs_tuners_ins.ignore_nice) {
141 cputime64_t cur_nice;
142 unsigned long cur_nice_jiffies;
143 struct cpu_dbs_info_s *dbs_info;
144
145 dbs_info = &per_cpu(cpu_dbs_info, cpu);
146 cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice,
147 dbs_info->prev_cpu_nice);
148 /*
149 * Assumption: nice time between sampling periods will be
150 * less than 2^32 jiffies for 32 bit sys
151 */
152 cur_nice_jiffies = (unsigned long)
153 cputime64_to_jiffies64(cur_nice);
154 dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice;
155 return idle_time + jiffies_to_usecs(cur_nice_jiffies);
156 }
157 return idle_time; 136 return idle_time;
158} 137}
159 138
@@ -319,6 +298,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
319 dbs_info = &per_cpu(cpu_dbs_info, j); 298 dbs_info = &per_cpu(cpu_dbs_info, j);
320 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 299 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
321 &dbs_info->prev_cpu_wall); 300 &dbs_info->prev_cpu_wall);
301 if (dbs_tuners_ins.ignore_nice)
302 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
303
322 } 304 }
323 mutex_unlock(&dbs_mutex); 305 mutex_unlock(&dbs_mutex);
324 306
@@ -419,6 +401,23 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
419 j_dbs_info->prev_cpu_idle); 401 j_dbs_info->prev_cpu_idle);
420 j_dbs_info->prev_cpu_idle = cur_idle_time; 402 j_dbs_info->prev_cpu_idle = cur_idle_time;
421 403
404 if (dbs_tuners_ins.ignore_nice) {
405 cputime64_t cur_nice;
406 unsigned long cur_nice_jiffies;
407
408 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
409 j_dbs_info->prev_cpu_nice);
410 /*
411 * Assumption: nice time between sampling periods will
412 * be less than 2^32 jiffies for 32 bit sys
413 */
414 cur_nice_jiffies = (unsigned long)
415 cputime64_to_jiffies64(cur_nice);
416
417 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
418 idle_time += jiffies_to_usecs(cur_nice_jiffies);
419 }
420
422 if (unlikely(!wall_time || wall_time < idle_time)) 421 if (unlikely(!wall_time || wall_time < idle_time))
423 continue; 422 continue;
424 423
@@ -575,6 +574,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
575 574
576 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 575 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
577 &j_dbs_info->prev_cpu_wall); 576 &j_dbs_info->prev_cpu_wall);
577 if (dbs_tuners_ins.ignore_nice) {
578 j_dbs_info->prev_cpu_nice =
579 kstat_cpu(j).cpustat.nice;
580 }
578 } 581 }
579 this_dbs_info->cpu = cpu; 582 this_dbs_info->cpu = cpu;
580 /* 583 /*