diff options
author | Paul Mackerras <paulus@samba.org> | 2007-11-09 16:39:38 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-11-09 16:39:38 -0500 |
commit | fa13a5a1f25f671d084d8884be96fc48d9b68275 (patch) | |
tree | 97dae05bb5baef806a6dcbeed8b7eb5bdc61e4ae /arch/powerpc | |
parent | 9a41785cc43d88397f787a651ed7286a33f8462f (diff) |
sched: restore deterministic CPU accounting on powerpc
Since powerpc started using CONFIG_GENERIC_CLOCKEVENTS, the
deterministic CPU accounting (CONFIG_VIRT_CPU_ACCOUNTING) has been
broken on powerpc, because we end up counting user time twice: once in
timer_interrupt() and once in update_process_times().
This fixes the problem by pulling the code in update_process_times
that updates utime and stime into a separate function called
account_process_tick. If CONFIG_VIRT_CPU_ACCOUNTING is not defined,
there is a version of account_process_tick in kernel/timer.c that
simply accounts a whole tick to either utime or stime as before. If
CONFIG_VIRT_CPU_ACCOUNTING is defined, then arch code gets to
implement account_process_tick.
This also lets us simplify the s390 code a bit; it means that the s390
timer interrupt can now call update_process_times even when
CONFIG_VIRT_CPU_ACCOUNTING is turned on, and can just implement a
suitable account_process_tick().
account_process_tick() now takes the task_struct * as an argument.
Tested both with and without CONFIG_VIRT_CPU_ACCOUNTING.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/time.c | 25 |
2 files changed, 2 insertions, 25 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index b9d88374f14f..41e13f4cc6e3 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -350,7 +350,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
350 | local_irq_save(flags); | 350 | local_irq_save(flags); |
351 | 351 | ||
352 | account_system_vtime(current); | 352 | account_system_vtime(current); |
353 | account_process_vtime(current); | 353 | account_process_tick(current, 0); |
354 | calculate_steal_time(); | 354 | calculate_steal_time(); |
355 | 355 | ||
356 | last = _switch(old_thread, new_thread); | 356 | last = _switch(old_thread, new_thread); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 9eb3284deac4..a70dfb76d0a8 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -259,7 +259,7 @@ void account_system_vtime(struct task_struct *tsk) | |||
259 | * user and system time records. | 259 | * user and system time records. |
260 | * Must be called with interrupts disabled. | 260 | * Must be called with interrupts disabled. |
261 | */ | 261 | */ |
262 | void account_process_vtime(struct task_struct *tsk) | 262 | void account_process_tick(struct task_struct *tsk, int user_tick) |
263 | { | 263 | { |
264 | cputime_t utime, utimescaled; | 264 | cputime_t utime, utimescaled; |
265 | 265 | ||
@@ -274,18 +274,6 @@ void account_process_vtime(struct task_struct *tsk) | |||
274 | account_user_time_scaled(tsk, utimescaled); | 274 | account_user_time_scaled(tsk, utimescaled); |
275 | } | 275 | } |
276 | 276 | ||
277 | static void account_process_time(struct pt_regs *regs) | ||
278 | { | ||
279 | int cpu = smp_processor_id(); | ||
280 | |||
281 | account_process_vtime(current); | ||
282 | run_local_timers(); | ||
283 | if (rcu_pending(cpu)) | ||
284 | rcu_check_callbacks(cpu, user_mode(regs)); | ||
285 | scheduler_tick(); | ||
286 | run_posix_cpu_timers(current); | ||
287 | } | ||
288 | |||
289 | /* | 277 | /* |
290 | * Stuff for accounting stolen time. | 278 | * Stuff for accounting stolen time. |
291 | */ | 279 | */ |
@@ -375,7 +363,6 @@ static void snapshot_purr(void) | |||
375 | 363 | ||
376 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ | 364 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ |
377 | #define calc_cputime_factors() | 365 | #define calc_cputime_factors() |
378 | #define account_process_time(regs) update_process_times(user_mode(regs)) | ||
379 | #define calculate_steal_time() do { } while (0) | 366 | #define calculate_steal_time() do { } while (0) |
380 | #endif | 367 | #endif |
381 | 368 | ||
@@ -599,16 +586,6 @@ void timer_interrupt(struct pt_regs * regs) | |||
599 | get_lppaca()->int_dword.fields.decr_int = 0; | 586 | get_lppaca()->int_dword.fields.decr_int = 0; |
600 | #endif | 587 | #endif |
601 | 588 | ||
602 | /* | ||
603 | * We cannot disable the decrementer, so in the period | ||
604 | * between this cpu's being marked offline in cpu_online_map | ||
605 | * and calling stop-self, it is taking timer interrupts. | ||
606 | * Avoid calling into the scheduler rebalancing code if this | ||
607 | * is the case. | ||
608 | */ | ||
609 | if (!cpu_is_offline(cpu)) | ||
610 | account_process_time(regs); | ||
611 | |||
612 | if (evt->event_handler) | 589 | if (evt->event_handler) |
613 | evt->event_handler(evt); | 590 | evt->event_handler(evt); |
614 | else | 591 | else |