aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>2008-08-04 14:59:09 -0400
committerDave Jones <davej@redhat.com>2008-10-09 13:52:44 -0400
commit3430502d356284ff4f7782d75bb01a402fd3d45e (patch)
treeb656eeb8b58e13e3b9d5a57b60cbf637829b3886 /drivers
parentc43aa3bd99a67009a167430e80c5fde6f37288d8 (diff)
[CPUFREQ][3/6] cpufreq: get_cpu_idle_time() changes in ondemand for idle-microaccounting
Preparatory changes for doing idle micro-accounting in ondemand governor. get_cpu_idle_time() gets extra parameter and returns idle time and also the wall time that corresponds to the idle time measurement. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 42fcb146ba22..b935092aab21 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -94,13 +94,13 @@ static struct dbs_tuners {
94 .powersave_bias = 0, 94 .powersave_bias = 0,
95}; 95};
96 96
97static inline cputime64_t get_cpu_idle_time(unsigned int cpu) 97static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
98{ 98{
99 cputime64_t idle_time; 99 cputime64_t idle_time;
100 cputime64_t cur_jiffies; 100 cputime64_t cur_wall_time;
101 cputime64_t busy_time; 101 cputime64_t busy_time;
102 102
103 cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); 103 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
104 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, 104 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
105 kstat_cpu(cpu).cpustat.system); 105 kstat_cpu(cpu).cpustat.system);
106 106
@@ -113,7 +113,10 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
113 kstat_cpu(cpu).cpustat.nice); 113 kstat_cpu(cpu).cpustat.nice);
114 } 114 }
115 115
116 idle_time = cputime64_sub(cur_jiffies, busy_time); 116 idle_time = cputime64_sub(cur_wall_time, busy_time);
117 if (wall)
118 *wall = cur_wall_time;
119
117 return idle_time; 120 return idle_time;
118} 121}
119 122
@@ -277,8 +280,8 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
277 for_each_online_cpu(j) { 280 for_each_online_cpu(j) {
278 struct cpu_dbs_info_s *dbs_info; 281 struct cpu_dbs_info_s *dbs_info;
279 dbs_info = &per_cpu(cpu_dbs_info, j); 282 dbs_info = &per_cpu(cpu_dbs_info, j);
280 dbs_info->prev_cpu_idle = get_cpu_idle_time(j); 283 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
281 dbs_info->prev_cpu_wall = get_jiffies_64(); 284 &dbs_info->prev_cpu_wall);
282 } 285 }
283 mutex_unlock(&dbs_mutex); 286 mutex_unlock(&dbs_mutex);
284 287
@@ -368,21 +371,19 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
368 int freq_avg; 371 int freq_avg;
369 372
370 j_dbs_info = &per_cpu(cpu_dbs_info, j); 373 j_dbs_info = &per_cpu(cpu_dbs_info, j);
371 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 374
375 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
376
372 wall_time = (unsigned int) cputime64_sub(cur_wall_time, 377 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
373 j_dbs_info->prev_cpu_wall); 378 j_dbs_info->prev_cpu_wall);
374 j_dbs_info->prev_cpu_wall = cur_wall_time; 379 j_dbs_info->prev_cpu_wall = cur_wall_time;
375 380
376 cur_idle_time = get_cpu_idle_time(j);
377 idle_time = (unsigned int) cputime64_sub(cur_idle_time, 381 idle_time = (unsigned int) cputime64_sub(cur_idle_time,
378 j_dbs_info->prev_cpu_idle); 382 j_dbs_info->prev_cpu_idle);
379 j_dbs_info->prev_cpu_idle = cur_idle_time; 383 j_dbs_info->prev_cpu_idle = cur_idle_time;
380 384
381 if (unlikely(wall_time <= idle_time || 385 if (unlikely(!wall_time || wall_time < idle_time))
382 (cputime_to_msecs(wall_time) <
383 dbs_tuners_ins.sampling_rate / (2 * 1000)))) {
384 continue; 386 continue;
385 }
386 387
387 load = 100 * (wall_time - idle_time) / wall_time; 388 load = 100 * (wall_time - idle_time) / wall_time;
388 389
@@ -531,8 +532,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
531 j_dbs_info = &per_cpu(cpu_dbs_info, j); 532 j_dbs_info = &per_cpu(cpu_dbs_info, j);
532 j_dbs_info->cur_policy = policy; 533 j_dbs_info->cur_policy = policy;
533 534
534 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); 535 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
535 j_dbs_info->prev_cpu_wall = get_jiffies_64(); 536 &j_dbs_info->prev_cpu_wall);
536 } 537 }
537 this_dbs_info->cpu = cpu; 538 this_dbs_info->cpu = cpu;
538 /* 539 /*