From e97c5b609880d97313b13eb71830fca62cee50c2 Mon Sep 17 00:00:00 2001 From: Jim Quinlan Date: Thu, 6 Sep 2012 11:36:56 -0400 Subject: MIPS: Make irqflags.h functions preempt-safe for non-mipsr2 cpus For non MIPSr2 processors, such as the BMIPS 5000, calls to arch_local_irq_disable() and others may be preempted, and in doing so a stale value may be restored to c0_status. This fix disables preemption for such processors prior to the call and enables it after the call. Those functions that needed this fix have been "outlined" to mips-atomic.c, as they are no longer good candidates for inlining. This bug was observed in a BMIPS 5000, occuring once every few hours in a continuous reboot test. It was traced to the write_lock_irq() function which was being invoked in release_task() in exit.c. By placing a number of "nops" inbetween the mfc0/mtc0 pair in arch_local_irq_disable(), which is called by write_lock_irq(), we were able to greatly increase the occurance of this bug. Similarly, the application of this commit silenced the bug. Signed-off-by: Jim Quinlan Cc: linux-mips@linux-mips.org Cc: David Daney Cc: Kevin Cernekee cernekee@gmail.com Cc: Jim Quinlan Patchwork: https://patchwork.linux-mips.org/patch/4321/ Signed-off-by: Ralf Baechle --- arch/mips/lib/mips-atomic.c | 176 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 176 insertions(+) create mode 100644 arch/mips/lib/mips-atomic.c (limited to 'arch/mips/lib/mips-atomic.c') diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c new file mode 100644 index 000000000000..e091430dbeb1 --- /dev/null +++ b/arch/mips/lib/mips-atomic.c @@ -0,0 +1,176 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle + * Copyright (C) 1996 by Paul M. Antoine + * Copyright (C) 1999 Silicon Graphics + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#include +#include +#include +#include +#include + +#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) + +/* + * For cli() we have to insert nops to make sure that the new value + * has actually arrived in the status register before the end of this + * macro. + * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs + * no nops at all. + */ +/* + * For TX49, operating only IE bit is not enough. + * + * If mfc0 $12 follows store and the mfc0 is last instruction of a + * page and fetching the next instruction causes TLB miss, the result + * of the mfc0 might wrongly contain EXL bit. + * + * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 + * + * Workaround: mask EXL bit of the result or place a nop before mfc0. + */ +__asm__( + " .macro arch_local_irq_disable\n" + " .set push \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 $1, $2, 1 \n" + " ori $1, 0x400 \n" + " .set noreorder \n" + " mtc0 $1, $2, 1 \n" +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 $1,$12 \n" + " ori $1,0x1f \n" + " xori $1,0x1f \n" + " .set noreorder \n" + " mtc0 $1,$12 \n" +#endif + " irq_disable_hazard \n" + " .set pop \n" + " .endm \n"); + +void arch_local_irq_disable(void) +{ + preempt_disable(); + __asm__ __volatile__( + "arch_local_irq_disable" + : /* no outputs */ + : /* no inputs */ + : "memory"); + preempt_enable(); +} +EXPORT_SYMBOL(arch_local_irq_disable); + + +__asm__( + " .macro arch_local_irq_save result \n" + " .set push \n" + " .set reorder \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 \\result, $2, 1 \n" + " ori $1, \\result, 0x400 \n" + " .set noreorder \n" + " mtc0 $1, $2, 1 \n" + " andi \\result, \\result, 0x400 \n" +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 \\result, $12 \n" + " ori $1, \\result, 0x1f \n" + " xori $1, 0x1f \n" + " .set noreorder \n" + " mtc0 $1, $12 \n" +#endif + " irq_disable_hazard \n" + " .set pop \n" + " .endm \n"); + +unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + preempt_disable(); + asm volatile("arch_local_irq_save\t%0" + : "=r" (flags) + : /* no inputs */ + : "memory"); + preempt_enable(); + return flags; +} +EXPORT_SYMBOL(arch_local_irq_save); + + +__asm__( + " .macro arch_local_irq_restore flags \n" + " .set push \n" + " .set noreorder \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + "mfc0 $1, $2, 1 \n" + "andi \\flags, 0x400 \n" + "ori $1, 0x400 \n" + "xori $1, 0x400 \n" + "or \\flags, $1 \n" + "mtc0 \\flags, $2, 1 \n" +#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) + /* see irqflags.h for inline function */ +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 $1, $12 \n" + " andi \\flags, 1 \n" + " ori $1, 0x1f \n" + " xori $1, 0x1f \n" + " or \\flags, $1 \n" + " mtc0 \\flags, $12 \n" +#endif + " irq_disable_hazard \n" + " .set pop \n" + " .endm \n"); + +void arch_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of branch and call overhead on each + * local_irq_restore() + */ + if (unlikely(!(flags & 0x0400))) + smtc_ipi_replay(); +#endif + preempt_disable(); + __asm__ __volatile__( + "arch_local_irq_restore\t%0" + : "=r" (__tmp1) + : "0" (flags) + : "memory"); + preempt_enable(); +} +EXPORT_SYMBOL(arch_local_irq_restore); + + +void __arch_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + + preempt_disable(); + __asm__ __volatile__( + "arch_local_irq_restore\t%0" + : "=r" (__tmp1) + : "0" (flags) + : "memory"); + preempt_enable(); +} +EXPORT_SYMBOL(__arch_local_irq_restore); + +#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ -- cgit v1.2.2 From f93a1a00f2bd550f86fd1a9f83c493755aecd15f Mon Sep 17 00:00:00 2001 From: Al Cooper Date: Thu, 15 Nov 2012 18:16:14 -0500 Subject: MIPS: Fix crash that occurs when function tracing is enabled A recent patch changed some irq routines from inlines to functions. These routines are called by the tracer code. Now that they're functions, if they are compiled for function tracing they will call the tracer and crash the system due to infinite recursion. The fix disables tracing in these functions by using "notrace" in the function definition. Signed-off-by: Al Cooper Reviewed-by: David Daney Pathchwork: https://patchwork.linux-mips.org/patch/4564/ Signed-off-by: Ralf Baechle --- arch/mips/lib/mips-atomic.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/mips/lib/mips-atomic.c') diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c index e091430dbeb1..cd160be3ce4d 100644 --- a/arch/mips/lib/mips-atomic.c +++ b/arch/mips/lib/mips-atomic.c @@ -56,7 +56,7 @@ __asm__( " .set pop \n" " .endm \n"); -void arch_local_irq_disable(void) +notrace void arch_local_irq_disable(void) { preempt_disable(); __asm__ __volatile__( @@ -93,7 +93,7 @@ __asm__( " .set pop \n" " .endm \n"); -unsigned long arch_local_irq_save(void) +notrace unsigned long arch_local_irq_save(void) { unsigned long flags; preempt_disable(); @@ -135,7 +135,7 @@ __asm__( " .set pop \n" " .endm \n"); -void arch_local_irq_restore(unsigned long flags) +notrace void arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; @@ -159,7 +159,7 @@ void arch_local_irq_restore(unsigned long flags) EXPORT_SYMBOL(arch_local_irq_restore); -void __arch_local_irq_restore(unsigned long flags) +notrace void __arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; -- cgit v1.2.2