aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2008-12-31 09:11:38 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-12-31 09:11:46 -0500
commit79741dd35713ff4f6fd0eafd59fa94e8a4ba922d (patch)
tree73c6b503fbd274cb3fcca7a0a68c6f636e3a53ad /kernel/time
parent457533a7d3402d1d91fbc125c8bd1bd16dcd3cd4 (diff)
[PATCH] idle cputime accounting
The cpu time spent by the idle process actually doing something is currently accounted as idle time. This is plain wrong, the architectures that support VIRT_CPU_ACCOUNTING=y can do better: distinguish between the time spent doing nothing and the time spent by idle doing work. The first is accounted with account_idle_time and the second with account_system_time. The architectures that use the account_xxx_time interface directly and not the account_xxx_ticks interface now need to do the check for the idle process in their arch code. In particular to improve the system vs true idle time accounting the arch code needs to measure the true idle time instead of just testing for the idle process. To improve the tick based accounting as well we would need an architecture primitive that can tell us if the pt_regs of the interrupted context points to the magic instruction that halts the cpu. In addition idle time is no more added to the stime of the idle process. This field now contains the system time of the idle process as it should be. On systems without VIRT_CPU_ACCOUNTING this will always be zero as every tick that occurs while idle is running will be accounted as idle time. This patch contains the necessary common code changes to be able to distinguish idle system time and true idle time. The architectures with support for VIRT_CPU_ACCOUNTING need some changes to exploit this. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/tick-sched.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 1f2fce2479fe..611fa4c0baab 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -419,8 +419,9 @@ void tick_nohz_restart_sched_tick(void)
419{ 419{
420 int cpu = smp_processor_id(); 420 int cpu = smp_processor_id();
421 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 421 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
422#ifndef CONFIG_VIRT_CPU_ACCOUNTING
422 unsigned long ticks; 423 unsigned long ticks;
423 cputime_t cputime; 424#endif
424 ktime_t now; 425 ktime_t now;
425 426
426 local_irq_disable(); 427 local_irq_disable();
@@ -442,6 +443,7 @@ void tick_nohz_restart_sched_tick(void)
442 tick_do_update_jiffies64(now); 443 tick_do_update_jiffies64(now);
443 cpu_clear(cpu, nohz_cpu_mask); 444 cpu_clear(cpu, nohz_cpu_mask);
444 445
446#ifndef CONFIG_VIRT_CPU_ACCOUNTING
445 /* 447 /*
446 * We stopped the tick in idle. Update process times would miss the 448 * We stopped the tick in idle. Update process times would miss the
447 * time we slept as update_process_times does only a 1 tick 449 * time we slept as update_process_times does only a 1 tick
@@ -451,12 +453,9 @@ void tick_nohz_restart_sched_tick(void)
451 /* 453 /*
452 * We might be one off. Do not randomly account a huge number of ticks! 454 * We might be one off. Do not randomly account a huge number of ticks!
453 */ 455 */
454 if (ticks && ticks < LONG_MAX) { 456 if (ticks && ticks < LONG_MAX)
455 add_preempt_count(HARDIRQ_OFFSET); 457 account_idle_ticks(ticks);
456 cputime = jiffies_to_cputime(ticks); 458#endif
457 account_system_time(current, HARDIRQ_OFFSET, cputime, cputime);
458 sub_preempt_count(HARDIRQ_OFFSET);
459 }
460 459
461 touch_softlockup_watchdog(); 460 touch_softlockup_watchdog();
462 /* 461 /*