From fa13a5a1f25f671d084d8884be96fc48d9b68275 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Fri, 9 Nov 2007 22:39:38 +0100 Subject: sched: restore deterministic CPU accounting on powerpc Since powerpc started using CONFIG_GENERIC_CLOCKEVENTS, the deterministic CPU accounting (CONFIG_VIRT_CPU_ACCOUNTING) has been broken on powerpc, because we end up counting user time twice: once in timer_interrupt() and once in update_process_times(). This fixes the problem by pulling the code in update_process_times that updates utime and stime into a separate function called account_process_tick. If CONFIG_VIRT_CPU_ACCOUNTING is not defined, there is a version of account_process_tick in kernel/timer.c that simply accounts a whole tick to either utime or stime as before. If CONFIG_VIRT_CPU_ACCOUNTING is defined, then arch code gets to implement account_process_tick. This also lets us simplify the s390 code a bit; it means that the s390 timer interrupt can now call update_process_times even when CONFIG_VIRT_CPU_ACCOUNTING is turned on, and can just implement a suitable account_process_tick(). account_process_tick() now takes the task_struct * as an argument. Tested both with and without CONFIG_VIRT_CPU_ACCOUNTING. Signed-off-by: Paul Mackerras Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/process.c | 2 +- arch/powerpc/kernel/time.c | 25 +------------------------ arch/s390/kernel/time.c | 4 ---- arch/s390/kernel/vtime.c | 8 +------- 4 files changed, 3 insertions(+), 36 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index b9d88374f14f..41e13f4cc6e3 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -350,7 +350,7 @@ struct task_struct *__switch_to(struct task_struct *prev, local_irq_save(flags); account_system_vtime(current); - account_process_vtime(current); + account_process_tick(current, 0); calculate_steal_time(); last = _switch(old_thread, new_thread); diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 9eb3284deac4..a70dfb76d0a8 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -259,7 +259,7 @@ void account_system_vtime(struct task_struct *tsk) * user and system time records. * Must be called with interrupts disabled. */ -void account_process_vtime(struct task_struct *tsk) +void account_process_tick(struct task_struct *tsk, int user_tick) { cputime_t utime, utimescaled; @@ -274,18 +274,6 @@ void account_process_vtime(struct task_struct *tsk) account_user_time_scaled(tsk, utimescaled); } -static void account_process_time(struct pt_regs *regs) -{ - int cpu = smp_processor_id(); - - account_process_vtime(current); - run_local_timers(); - if (rcu_pending(cpu)) - rcu_check_callbacks(cpu, user_mode(regs)); - scheduler_tick(); - run_posix_cpu_timers(current); -} - /* * Stuff for accounting stolen time. */ @@ -375,7 +363,6 @@ static void snapshot_purr(void) #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ #define calc_cputime_factors() -#define account_process_time(regs) update_process_times(user_mode(regs)) #define calculate_steal_time() do { } while (0) #endif @@ -599,16 +586,6 @@ void timer_interrupt(struct pt_regs * regs) get_lppaca()->int_dword.fields.decr_int = 0; #endif - /* - * We cannot disable the decrementer, so in the period - * between this cpu's being marked offline in cpu_online_map - * and calling stop-self, it is taking timer interrupts. - * Avoid calling into the scheduler rebalancing code if this - * is the case. - */ - if (!cpu_is_offline(cpu)) - account_process_time(regs); - if (evt->event_handler) evt->event_handler(evt); else diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index a963fe81359e..22b800ce2126 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -145,12 +145,8 @@ void account_ticks(u64 time) do_timer(ticks); #endif -#ifdef CONFIG_VIRT_CPU_ACCOUNTING - account_tick_vtime(current); -#else while (ticks--) update_process_times(user_mode(get_irq_regs())); -#endif s390_do_profile(); } diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 84ff78de6bac..c5f05b3fb2c3 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -32,7 +32,7 @@ static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. */ -void account_tick_vtime(struct task_struct *tsk) +void account_process_tick(struct task_struct *tsk, int user_tick) { cputime_t cputime; __u64 timer, clock; @@ -64,12 +64,6 @@ void account_tick_vtime(struct task_struct *tsk) S390_lowcore.steal_clock -= cputime << 12; account_steal_time(tsk, cputime); } - - run_local_timers(); - if (rcu_pending(smp_processor_id())) - rcu_check_callbacks(smp_processor_id(), rcu_user_flag); - scheduler_tick(); - run_posix_cpu_timers(tsk); } /* -- cgit v1.2.2 From 4e2947f12516d13446d6ffa1d9e4fbd33b1636fa Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 9 Nov 2007 22:39:38 +0100 Subject: x86: make ipi_handler() always defined prepare for up_smp_call_function() to ensure that the 'func' pointer is unused. (which is related to a KVM build fix) Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mtrr/main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 9abbdf7562c5..3b20613325dc 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -139,13 +139,12 @@ struct set_mtrr_data { mtrr_type smp_type; }; -#ifdef CONFIG_SMP - static void ipi_handler(void *info) /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. [RETURNS] Nothing. */ { +#ifdef CONFIG_SMP struct set_mtrr_data *data = info; unsigned long flags; @@ -168,9 +167,8 @@ static void ipi_handler(void *info) atomic_dec(&data->count); local_irq_restore(flags); -} - #endif +} static inline int types_compatible(mtrr_type type1, mtrr_type type2) { return type1 == MTRR_TYPE_UNCACHABLE || -- cgit v1.2.2 From 0492007ed9b53f6a2a2f983910d0fe7c97b09822 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 9 Nov 2007 22:39:38 +0100 Subject: x86: make nmi_cpu_busy() always defined nmi_cpu_busy() must be available on !SMP too. this is in preparation to a smp_call_function_mask() fix. Signed-off-by: Ingo Molnar --- arch/x86/kernel/nmi_32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index f803ed0ed1c4..600fd404e440 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c @@ -51,13 +51,13 @@ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); static int endflag __initdata = 0; -#ifdef CONFIG_SMP /* The performance counters used by NMI_LOCAL_APIC don't trigger when * the CPU is idle. To make sure the NMI watchdog really ticks on all * CPUs during the test make them busy. */ static __init void nmi_cpu_busy(void *data) { +#ifdef CONFIG_SMP local_irq_enable_in_hardirq(); /* Intentionally don't use cpu_relax here. This is to make sure that the performance counter really ticks, @@ -67,8 +67,8 @@ static __init void nmi_cpu_busy(void *data) care if they get somewhat less cycles. */ while (endflag == 0) mb(); -} #endif +} static int __init check_nmi_watchdog(void) { -- cgit v1.2.2