diff options
author | Venki Pallipadi <venkatesh.pallipadi@intel.com> | 2008-01-30 07:30:04 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:30:04 -0500 |
commit | 6378ddb592158db4b42197f1bc8666228800e379 (patch) | |
tree | f9b1e671dfd12fb221f6140dd231ccb14cd9f27e /kernel | |
parent | bbe4d18ac2e058c56adb0cd71f49d9ed3216a405 (diff) |
time: track accurate idle time with tick_sched.idle_sleeptime
Current idle time in kstat is based on jiffies and is coarse grained.
tick_sched.idle_sleeptime is making some attempt to keep track of idle time
in a fine grained manner. But, it is not handling the time spent in
interrupts fully.
Make tick_sched.idle_sleeptime accurate with respect to time spent on
handling interrupts and also add tick_sched.idle_lastupdate, which keeps
track of last time when idle_sleeptime was updated.
This statistics will be crucial for cpufreq-ondemand governor, which can
shed some conservative gaurd band that is uses today while setting the
frequency. The ondemand changes that uses the exact idle time is coming
soon.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/softirq.c | 7 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 70 |
2 files changed, 54 insertions, 23 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index 8fe1ff40102d..d7837d45419e 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -280,9 +280,14 @@ asmlinkage void do_softirq(void) | |||
280 | */ | 280 | */ |
281 | void irq_enter(void) | 281 | void irq_enter(void) |
282 | { | 282 | { |
283 | #ifdef CONFIG_NO_HZ | ||
284 | int cpu = smp_processor_id(); | ||
285 | if (idle_cpu(cpu) && !in_interrupt()) | ||
286 | tick_nohz_stop_idle(cpu); | ||
287 | #endif | ||
283 | __irq_enter(); | 288 | __irq_enter(); |
284 | #ifdef CONFIG_NO_HZ | 289 | #ifdef CONFIG_NO_HZ |
285 | if (idle_cpu(smp_processor_id())) | 290 | if (idle_cpu(cpu)) |
286 | tick_nohz_update_jiffies(); | 291 | tick_nohz_update_jiffies(); |
287 | #endif | 292 | #endif |
288 | } | 293 | } |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 49e12f6a4bab..63f24b550695 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -143,6 +143,44 @@ void tick_nohz_update_jiffies(void) | |||
143 | local_irq_restore(flags); | 143 | local_irq_restore(flags); |
144 | } | 144 | } |
145 | 145 | ||
146 | void tick_nohz_stop_idle(int cpu) | ||
147 | { | ||
148 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
149 | |||
150 | if (ts->idle_active) { | ||
151 | ktime_t now, delta; | ||
152 | now = ktime_get(); | ||
153 | delta = ktime_sub(now, ts->idle_entrytime); | ||
154 | ts->idle_lastupdate = now; | ||
155 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | ||
156 | ts->idle_active = 0; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static ktime_t tick_nohz_start_idle(int cpu) | ||
161 | { | ||
162 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
163 | ktime_t now, delta; | ||
164 | |||
165 | now = ktime_get(); | ||
166 | if (ts->idle_active) { | ||
167 | delta = ktime_sub(now, ts->idle_entrytime); | ||
168 | ts->idle_lastupdate = now; | ||
169 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | ||
170 | } | ||
171 | ts->idle_entrytime = now; | ||
172 | ts->idle_active = 1; | ||
173 | return now; | ||
174 | } | ||
175 | |||
176 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | ||
177 | { | ||
178 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
179 | |||
180 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | ||
181 | return ktime_to_us(ts->idle_sleeptime); | ||
182 | } | ||
183 | |||
146 | /** | 184 | /** |
147 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task | 185 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task |
148 | * | 186 | * |
@@ -155,13 +193,14 @@ void tick_nohz_stop_sched_tick(void) | |||
155 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; | 193 | unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; |
156 | unsigned long rt_jiffies; | 194 | unsigned long rt_jiffies; |
157 | struct tick_sched *ts; | 195 | struct tick_sched *ts; |
158 | ktime_t last_update, expires, now, delta; | 196 | ktime_t last_update, expires, now; |
159 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; | 197 | struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; |
160 | int cpu; | 198 | int cpu; |
161 | 199 | ||
162 | local_irq_save(flags); | 200 | local_irq_save(flags); |
163 | 201 | ||
164 | cpu = smp_processor_id(); | 202 | cpu = smp_processor_id(); |
203 | now = tick_nohz_start_idle(cpu); | ||
165 | ts = &per_cpu(tick_cpu_sched, cpu); | 204 | ts = &per_cpu(tick_cpu_sched, cpu); |
166 | 205 | ||
167 | /* | 206 | /* |
@@ -193,19 +232,7 @@ void tick_nohz_stop_sched_tick(void) | |||
193 | } | 232 | } |
194 | } | 233 | } |
195 | 234 | ||
196 | now = ktime_get(); | ||
197 | /* | ||
198 | * When called from irq_exit we need to account the idle sleep time | ||
199 | * correctly. | ||
200 | */ | ||
201 | if (ts->tick_stopped) { | ||
202 | delta = ktime_sub(now, ts->idle_entrytime); | ||
203 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | ||
204 | } | ||
205 | |||
206 | ts->idle_entrytime = now; | ||
207 | ts->idle_calls++; | 235 | ts->idle_calls++; |
208 | |||
209 | /* Read jiffies and the time when jiffies were updated last */ | 236 | /* Read jiffies and the time when jiffies were updated last */ |
210 | do { | 237 | do { |
211 | seq = read_seqbegin(&xtime_lock); | 238 | seq = read_seqbegin(&xtime_lock); |
@@ -337,23 +364,22 @@ void tick_nohz_restart_sched_tick(void) | |||
337 | int cpu = smp_processor_id(); | 364 | int cpu = smp_processor_id(); |
338 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 365 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
339 | unsigned long ticks; | 366 | unsigned long ticks; |
340 | ktime_t now, delta; | 367 | ktime_t now; |
341 | 368 | ||
342 | if (!ts->tick_stopped) | 369 | local_irq_disable(); |
370 | tick_nohz_stop_idle(cpu); | ||
371 | |||
372 | if (!ts->tick_stopped) { | ||
373 | local_irq_enable(); | ||
343 | return; | 374 | return; |
375 | } | ||
344 | 376 | ||
345 | /* Update jiffies first */ | 377 | /* Update jiffies first */ |
346 | now = ktime_get(); | ||
347 | |||
348 | local_irq_disable(); | ||
349 | select_nohz_load_balancer(0); | 378 | select_nohz_load_balancer(0); |
379 | now = ktime_get(); | ||
350 | tick_do_update_jiffies64(now); | 380 | tick_do_update_jiffies64(now); |
351 | cpu_clear(cpu, nohz_cpu_mask); | 381 | cpu_clear(cpu, nohz_cpu_mask); |
352 | 382 | ||
353 | /* Account the idle time */ | ||
354 | delta = ktime_sub(now, ts->idle_entrytime); | ||
355 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | ||
356 | |||
357 | /* | 383 | /* |
358 | * We stopped the tick in idle. Update process times would miss the | 384 | * We stopped the tick in idle. Update process times would miss the |
359 | * time we slept as update_process_times does only a 1 tick | 385 | * time we slept as update_process_times does only a 1 tick |