aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVenki Pallipadi <venkatesh.pallipadi@intel.com>2007-06-20 17:26:24 -0400
committerDave Jones <davej@redhat.com>2007-06-21 12:57:53 -0400
commitea48761519bd40d7a881c587b5f3177664b2987e (patch)
tree76694d9cd379490be903d8a73fa7588ad504197e
parent0af99b13c9f323e658b4f1d69a1ccae7d6f3f80a (diff)
[CPUFREQ] ondemand: fix tickless accounting and software coordination bug
With tickless kernel and software coordination os P-states, ondemand can look at wrong idle statistics. This can happen when ondemand sampling is happening on CPU 0 and due to software coordination sampling also looks at utilization of CPU 1. If CPU 1 is in tickless state at that moment, its idle statistics will not be uptodate and CPU 0 thinks CPU 1 is idle for less amount of time than it actually is. This can be resolved by looking at all the busy times of CPUs, which is accurate, even with tickless, and use that to determine idle time in a round about way (total time - busy time). Thanks to Arjan for originally reporting the ondemand bug on Lenovo T61. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Dave Jones <davej@redhat.com>
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index dc6f357390e2..e794527e4925 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -96,15 +96,25 @@ static struct dbs_tuners {
96 96
97static inline cputime64_t get_cpu_idle_time(unsigned int cpu) 97static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
98{ 98{
99 cputime64_t retval; 99 cputime64_t idle_time;
100 cputime64_t cur_jiffies;
101 cputime64_t busy_time;
100 102
101 retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, 103 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
102 kstat_cpu(cpu).cpustat.iowait); 104 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
105 kstat_cpu(cpu).cpustat.system);
103 106
104 if (dbs_tuners_ins.ignore_nice) 107 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
105 retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); 108 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
109 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
106 110
107 return retval; 111 if (!dbs_tuners_ins.ignore_nice) {
112 busy_time = cputime64_add(busy_time,
113 kstat_cpu(cpu).cpustat.nice);
114 }
115
116 idle_time = cputime64_sub(cur_jiffies, busy_time);
117 return idle_time;
108} 118}
109 119
110/* 120/*
@@ -339,7 +349,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
339 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); 349 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
340 total_ticks = (unsigned int) cputime64_sub(cur_jiffies, 350 total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
341 this_dbs_info->prev_cpu_wall); 351 this_dbs_info->prev_cpu_wall);
342 this_dbs_info->prev_cpu_wall = cur_jiffies; 352 this_dbs_info->prev_cpu_wall = get_jiffies_64();
353
343 if (!total_ticks) 354 if (!total_ticks)
344 return; 355 return;
345 /* 356 /*